code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' _UpperCamelCase : Dict = '0.21.0' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
304
'''simple docstring''' _UpperCamelCase : Tuple = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _UpperCamelCase : Any = [{'type': 'code', 'content': INSTALL_CONTENT}] _UpperCamelCase : Dict = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
304
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase : int = logging.get_logger(__name__) def __UpperCAmelCase ( A : Any , A : Optional[int]=False , A : Union[str, Any]=False ) -> Any: UpperCAmelCase_ : Optional[int] = '''backbone.''' if is_semantic else '''''' UpperCAmelCase_ : List[str] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append( (F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ (F"{prefix}cls_token", '''beit.embeddings.cls_token'''), (F"{prefix}patch_embed.proj.weight", '''beit.embeddings.patch_embeddings.projection.weight'''), (F"{prefix}patch_embed.proj.bias", '''beit.embeddings.patch_embeddings.projection.bias'''), (F"{prefix}pos_embed", '''beit.embeddings.position_embeddings'''), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('''mask_token''', '''beit.embeddings.mask_token'''), ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ] ) else: # layernorm + classification head rename_keys.extend( [ ('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''), ('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def __UpperCAmelCase ( A : str , A : Union[str, Any] , A : Optional[Any]=False , A : Tuple=False ) -> Dict: for i in range(config.num_hidden_layers ): UpperCAmelCase_ : Union[str, Any] = '''backbone.''' if is_semantic else '''''' # queries, keys and values UpperCAmelCase_ : Dict = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" ) UpperCAmelCase_ : str = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" ) UpperCAmelCase_ : Optional[int] = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" ) UpperCAmelCase_ : Any = in_proj_weight[ : config.hidden_size, : ] UpperCAmelCase_ : Any = q_bias UpperCAmelCase_ : str = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] UpperCAmelCase_ : str = in_proj_weight[ -config.hidden_size :, : ] UpperCAmelCase_ : int = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained UpperCAmelCase_ : Dict = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" ) UpperCAmelCase_ : List[str] = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" ) UpperCAmelCase_ : Optional[Any] = gamma_a UpperCAmelCase_ : Optional[Any] = gamma_a def __UpperCAmelCase ( A : Dict , A : int , A : Union[str, Any] ) -> List[str]: UpperCAmelCase_ : Union[str, Any] = dct.pop(A ) UpperCAmelCase_ : str = val def __UpperCAmelCase ( ) -> str: UpperCAmelCase_ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase_ : Dict = Image.open(requests.get(A , stream=A ).raw ) return im @torch.no_grad() def __UpperCAmelCase ( A : List[str] , A : int , A : Tuple=False ) -> Tuple: UpperCAmelCase_ : Union[str, Any] = False if '''rvlcdip''' in checkpoint_url else True UpperCAmelCase_ : List[str] = BeitConfig(use_absolute_position_embeddings=A , use_mask_token=A ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: UpperCAmelCase_ : Union[str, Any] = 1_0_2_4 UpperCAmelCase_ : List[str] = 4_0_9_6 UpperCAmelCase_ : Union[str, Any] = 2_4 UpperCAmelCase_ : List[str] = 1_6 # labels if "rvlcdip" in checkpoint_url: UpperCAmelCase_ : Optional[int] = 1_6 UpperCAmelCase_ : Optional[Any] = '''huggingface/label-files''' UpperCAmelCase_ : str = '''rvlcdip-id2label.json''' UpperCAmelCase_ : Union[str, Any] = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase_ : Optional[Any] = {int(A ): v for k, v in idalabel.items()} UpperCAmelCase_ : int = idalabel UpperCAmelCase_ : Optional[Any] = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys UpperCAmelCase_ : List[Any] = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''model'''] UpperCAmelCase_ : str = create_rename_keys(A , has_lm_head=A ) for src, dest in rename_keys: rename_key(A , A , A ) read_in_q_k_v(A , A , has_lm_head=A ) # load HuggingFace model UpperCAmelCase_ : Optional[Any] = BeitForMaskedImageModeling(A ) if has_lm_head else BeitForImageClassification(A ) model.eval() model.load_state_dict(A ) # Check outputs on an image UpperCAmelCase_ : Tuple = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=A ) UpperCAmelCase_ : str = prepare_img() UpperCAmelCase_ : List[Any] = image_processor(images=A , return_tensors='''pt''' ) UpperCAmelCase_ : Tuple = encoding['''pixel_values'''] UpperCAmelCase_ : str = model(A ) UpperCAmelCase_ : List[Any] = outputs.logits # verify logits UpperCAmelCase_ : Dict = [1, 1_6] if '''rvlcdip''' in checkpoint_url else [1, 1_9_6, 8_1_9_2] assert logits.shape == torch.Size(A ), "Shape of logits not as expected" Path(A ).mkdir(exist_ok=A ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(A ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(A ) if push_to_hub: if has_lm_head: UpperCAmelCase_ : str = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large''' else: UpperCAmelCase_ : Optional[int] = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip''' image_processor.push_to_hub( repo_path_or_name=Path(A , A ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=A , ) model.push_to_hub( repo_path_or_name=Path(A , A ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=A , ) if __name__ == "__main__": _UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( '--checkpoint_url', default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth', type=str, help='URL to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', ) _UpperCamelCase : List[Any] = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
304
'''simple docstring''' import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def __UpperCAmelCase ( A : List[str] , A : Any , A : Optional[int] , A : Optional[int] ) -> Optional[Any]: if isinstance(A , A ): UpperCAmelCase_ : Any = np.full((len(A ), sequence_length, 2) , A ) else: UpperCAmelCase_ : int = np.full((len(A ), sequence_length) , A ) for i, tensor in enumerate(A ): if padding_side == "right": if isinstance(A , A ): UpperCAmelCase_ : Tuple = tensor[:sequence_length] else: UpperCAmelCase_ : Dict = tensor[:sequence_length] else: if isinstance(A , A ): UpperCAmelCase_ : Optional[Any] = tensor[:sequence_length] else: UpperCAmelCase_ : int = tensor[:sequence_length] return out_tensor.tolist() def __UpperCAmelCase ( A : List[Any] ) -> str: UpperCAmelCase_ : Dict = ord(A ) if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6): return True UpperCAmelCase_ : Union[str, Any] = unicodedata.category(A ) if cat.startswith('''P''' ): return True return False @dataclass class snake_case__ ( UpperCamelCase): a_ = 42 a_ = True a_ = None a_ = None a_ = -100 a_ = "pt" def A ( self : List[Any] , _A : Dict ) -> Tuple: import torch UpperCAmelCase_ : Dict = '''label''' if '''label''' in features[0].keys() else '''labels''' UpperCAmelCase_ : List[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None UpperCAmelCase_ : Tuple = self.tokenizer.pad( _A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch UpperCAmelCase_ : Any = torch.tensor(batch['''entity_ids'''] ).shape[1] UpperCAmelCase_ : Union[str, Any] = self.tokenizer.padding_side if padding_side == "right": UpperCAmelCase_ : Optional[Any] = [ list(_A ) + [self.label_pad_token_id] * (sequence_length - len(_A )) for label in labels ] else: UpperCAmelCase_ : Any = [ [self.label_pad_token_id] * (sequence_length - len(_A )) + list(_A ) for label in labels ] UpperCAmelCase_ : Union[str, Any] = [feature['''ner_tags'''] for feature in features] UpperCAmelCase_ : Union[str, Any] = padding_tensor(_A , -1 , _A , _A ) UpperCAmelCase_ : List[str] = [feature['''original_entity_spans'''] for feature in features] UpperCAmelCase_ : int = padding_tensor(_A , (-1, -1) , _A , _A ) UpperCAmelCase_ : Union[str, Any] = {k: torch.tensor(_A , dtype=torch.intaa ) for k, v in batch.items()} return batch
304
1
'''simple docstring''' def __UpperCAmelCase ( A : str ) -> Optional[Any]: UpperCAmelCase_ : Optional[Any] = [0] * len(A ) UpperCAmelCase_ : Any = [] UpperCAmelCase_ : Optional[int] = [1] * len(A ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(A ) ): if indegree[i] == 0: queue.append(A ) while queue: UpperCAmelCase_ : Any = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: UpperCAmelCase_ : Union[str, Any] = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(A ) print(max(A ) ) # Adjacency list of Graph _UpperCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
304
'''simple docstring''' import functools def __UpperCAmelCase ( A : str , A : str ) -> int: UpperCAmelCase_ : Optional[Any] = len(A ) UpperCAmelCase_ : List[str] = len(A ) @functools.cache def min_distance(A : int , A : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCAmelCase_ : Any = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , A ) , 1 + min_distance(A , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
304
1
'''simple docstring''' import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask _UpperCamelCase : int = logging.getLogger(__name__) class snake_case__ ( UpperCamelCase): a_ = "token-classification" def __init__( self : Optional[int] , _A : Union[str, Any] ) -> Union[str, Any]: if type(_A ) == dict: UpperCAmelCase_ : str = Namespace(**_A ) UpperCAmelCase_ : Optional[Any] = import_module('''tasks''' ) try: UpperCAmelCase_ : Any = getattr(_A , hparams.task_type ) UpperCAmelCase_ : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( F"Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. " F"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" ) UpperCAmelCase_ : Union[str, Any] = self.token_classification_task.get_labels(hparams.labels ) UpperCAmelCase_ : Tuple = CrossEntropyLoss().ignore_index super().__init__(_A , len(self.labels ) , self.mode ) def A ( self : Optional[int] , **_A : Union[str, Any] ) -> Any: return self.model(**_A ) def A ( self : Any , _A : int , _A : Tuple ) -> Any: UpperCAmelCase_ : int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type != "distilbert": UpperCAmelCase_ : List[Any] = ( batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None ) # XLM and RoBERTa don"t use token_type_ids UpperCAmelCase_ : Union[str, Any] = self(**_A ) UpperCAmelCase_ : int = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def A ( self : Dict ) -> Tuple: UpperCAmelCase_ : str = self.hparams for mode in ["train", "dev", "test"]: UpperCAmelCase_ : List[Any] = self._feature_file(_A ) if os.path.exists(_A ) and not args.overwrite_cache: logger.info('''Loading features from cached file %s''' , _A ) UpperCAmelCase_ : Dict = torch.load(_A ) else: logger.info('''Creating features from dataset file at %s''' , args.data_dir ) UpperCAmelCase_ : Any = self.token_classification_task.read_examples_from_file(args.data_dir , _A ) UpperCAmelCase_ : List[Any] = self.token_classification_task.convert_examples_to_features( _A , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=_A , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info('''Saving features into cached file %s''' , _A ) torch.save(_A , _A ) def A ( self : Dict , _A : int , _A : int , _A : bool = False ) -> DataLoader: UpperCAmelCase_ : List[str] = self._feature_file(_A ) logger.info('''Loading features from cached file %s''' , _A ) UpperCAmelCase_ : str = torch.load(_A ) UpperCAmelCase_ : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) UpperCAmelCase_ : Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: UpperCAmelCase_ : str = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: UpperCAmelCase_ : Any = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) UpperCAmelCase_ : Optional[Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(_A , _A , _A , _A ) , batch_size=_A ) def A ( self : Union[str, Any] , _A : Optional[int] , _A : str ) -> List[Any]: """Compute validation""" "" UpperCAmelCase_ : List[Any] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type != "distilbert": UpperCAmelCase_ : str = ( batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None ) # XLM and RoBERTa don"t use token_type_ids UpperCAmelCase_ : str = self(**_A ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = outputs[:2] UpperCAmelCase_ : List[Any] = logits.detach().cpu().numpy() UpperCAmelCase_ : Optional[int] = inputs['''labels'''].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def A ( self : Optional[Any] , _A : str ) -> List[str]: UpperCAmelCase_ : str = torch.stack([x['''val_loss'''] for x in outputs] ).mean() UpperCAmelCase_ : Tuple = np.concatenate([x['''pred'''] for x in outputs] , axis=0 ) UpperCAmelCase_ : Tuple = np.argmax(_A , axis=2 ) UpperCAmelCase_ : List[str] = np.concatenate([x['''target'''] for x in outputs] , axis=0 ) UpperCAmelCase_ : Dict = dict(enumerate(self.labels ) ) UpperCAmelCase_ : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )] UpperCAmelCase_ : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) UpperCAmelCase_ : str = { '''val_loss''': val_loss_mean, '''accuracy_score''': accuracy_score(_A , _A ), '''precision''': precision_score(_A , _A ), '''recall''': recall_score(_A , _A ), '''f1''': fa_score(_A , _A ), } UpperCAmelCase_ : List[str] = dict(results.items() ) UpperCAmelCase_ : Optional[Any] = results return ret, preds_list, out_label_list def A ( self : List[Any] , _A : List[Any] ) -> List[str]: # when stable UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = self._eval_end(_A ) UpperCAmelCase_ : str = ret['''log'''] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def A ( self : List[Any] , _A : List[Any] ) -> Union[str, Any]: # updating to test_epoch_end instead of deprecated test_end UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self._eval_end(_A ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 UpperCAmelCase_ : int = ret['''log'''] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def A ( _A : Tuple , _A : Dict ) -> Optional[int]: # Add NER specific options BaseTransformer.add_model_specific_args(_A , _A ) parser.add_argument( '''--task_type''' , default='''NER''' , type=_A , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' ) parser.add_argument( '''--max_seq_length''' , default=1_28 , type=_A , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--labels''' , default='''''' , type=_A , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , ) parser.add_argument( '''--gpus''' , default=0 , type=_A , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) return parser if __name__ == "__main__": _UpperCamelCase : List[str] = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) _UpperCamelCase : Optional[Any] = NERTransformer.add_model_specific_args(parser, os.getcwd()) _UpperCamelCase : Union[str, Any] = parser.parse_args() _UpperCamelCase : Union[str, Any] = NERTransformer(args) _UpperCamelCase : str = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 _UpperCamelCase : Any = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True)) _UpperCamelCase : str = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
304
'''simple docstring''' def __UpperCAmelCase ( A : int = 1_0_0_0 ) -> int: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = 1, 1 UpperCAmelCase_ : Dict = [] for i in range(1 , n + 1 ): UpperCAmelCase_ : Optional[int] = prev_numerator + 2 * prev_denominator UpperCAmelCase_ : Tuple = prev_numerator + prev_denominator if len(str(A ) ) > len(str(A ) ): result.append(A ) UpperCAmelCase_ : Optional[Any] = numerator UpperCAmelCase_ : Optional[int] = denominator return len(A ) if __name__ == "__main__": print(f'''{solution() = }''')
304
1
'''simple docstring''' import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def __UpperCAmelCase ( A : int ) -> List[str]: UpperCAmelCase_ : Optional[Any] = SwinConfig() UpperCAmelCase_ : List[str] = swin_name.split('''_''' ) UpperCAmelCase_ : Optional[Any] = name_split[1] UpperCAmelCase_ : Optional[Any] = int(name_split[4] ) UpperCAmelCase_ : Tuple = int(name_split[3][-1] ) if model_size == "tiny": UpperCAmelCase_ : List[Any] = 9_6 UpperCAmelCase_ : Any = (2, 2, 6, 2) UpperCAmelCase_ : Optional[int] = (3, 6, 1_2, 2_4) elif model_size == "small": UpperCAmelCase_ : str = 9_6 UpperCAmelCase_ : Union[str, Any] = (2, 2, 1_8, 2) UpperCAmelCase_ : str = (3, 6, 1_2, 2_4) elif model_size == "base": UpperCAmelCase_ : Dict = 1_2_8 UpperCAmelCase_ : Union[str, Any] = (2, 2, 1_8, 2) UpperCAmelCase_ : List[Any] = (4, 8, 1_6, 3_2) else: UpperCAmelCase_ : Optional[Any] = 1_9_2 UpperCAmelCase_ : Dict = (2, 2, 1_8, 2) UpperCAmelCase_ : List[Any] = (6, 1_2, 2_4, 4_8) if "in22k" in swin_name: UpperCAmelCase_ : int = 2_1_8_4_1 else: UpperCAmelCase_ : Optional[Any] = 1_0_0_0 UpperCAmelCase_ : Optional[int] = '''huggingface/label-files''' UpperCAmelCase_ : int = '''imagenet-1k-id2label.json''' UpperCAmelCase_ : Tuple = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) ) UpperCAmelCase_ : Union[str, Any] = {int(A ): v for k, v in idalabel.items()} UpperCAmelCase_ : Dict = idalabel UpperCAmelCase_ : Dict = {v: k for k, v in idalabel.items()} UpperCAmelCase_ : Optional[int] = img_size UpperCAmelCase_ : List[str] = num_classes UpperCAmelCase_ : Tuple = embed_dim UpperCAmelCase_ : Optional[int] = depths UpperCAmelCase_ : Any = num_heads UpperCAmelCase_ : Any = window_size return config def __UpperCAmelCase ( A : Dict ) -> List[Any]: if "patch_embed.proj" in name: UpperCAmelCase_ : List[Any] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: UpperCAmelCase_ : int = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: UpperCAmelCase_ : Union[str, Any] = '''encoder.''' + name if "attn.proj" in name: UpperCAmelCase_ : Dict = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: UpperCAmelCase_ : List[str] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: UpperCAmelCase_ : List[str] = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: UpperCAmelCase_ : Tuple = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: UpperCAmelCase_ : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: UpperCAmelCase_ : str = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "norm.weight": UpperCAmelCase_ : Dict = '''layernorm.weight''' if name == "norm.bias": UpperCAmelCase_ : Union[str, Any] = '''layernorm.bias''' if "head" in name: UpperCAmelCase_ : List[Any] = name.replace('''head''' , '''classifier''' ) else: UpperCAmelCase_ : str = '''swin.''' + name return name def __UpperCAmelCase ( A : Any , A : str ) -> Union[str, Any]: for key in orig_state_dict.copy().keys(): UpperCAmelCase_ : Any = orig_state_dict.pop(A ) if "mask" in key: continue elif "qkv" in key: UpperCAmelCase_ : str = key.split('''.''' ) UpperCAmelCase_ : Tuple = int(key_split[1] ) UpperCAmelCase_ : List[Any] = int(key_split[3] ) UpperCAmelCase_ : str = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: UpperCAmelCase_ : List[str] = val[:dim, :] UpperCAmelCase_ : Dict = val[ dim : dim * 2, : ] UpperCAmelCase_ : Dict = val[-dim:, :] else: UpperCAmelCase_ : Any = val[ :dim ] UpperCAmelCase_ : Dict = val[ dim : dim * 2 ] UpperCAmelCase_ : Optional[Any] = val[ -dim: ] else: UpperCAmelCase_ : Optional[Any] = val return orig_state_dict def __UpperCAmelCase ( A : Optional[Any] , A : Optional[Any] ) -> List[str]: UpperCAmelCase_ : Dict = timm.create_model(A , pretrained=A ) timm_model.eval() UpperCAmelCase_ : Any = get_swin_config(A ) UpperCAmelCase_ : str = SwinForImageClassification(A ) model.eval() UpperCAmelCase_ : str = convert_state_dict(timm_model.state_dict() , A ) model.load_state_dict(A ) UpperCAmelCase_ : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase_ : Any = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) ) UpperCAmelCase_ : Union[str, Any] = Image.open(requests.get(A , stream=A ).raw ) UpperCAmelCase_ : Dict = image_processor(images=A , return_tensors='''pt''' ) UpperCAmelCase_ : str = timm_model(inputs['''pixel_values'''] ) UpperCAmelCase_ : str = model(**A ).logits assert torch.allclose(A , A , atol=1e-3 ) print(F"Saving model {swin_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(A ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(A ) if __name__ == "__main__": _UpperCamelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swin_name', default='swin_tiny_patch4_window7_224', type=str, help='Name of the Swin timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) _UpperCamelCase : List[Any] = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
304
'''simple docstring''' import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class snake_case__ ( unittest.TestCase): def __init__( self : int , _A : List[str] , _A : Dict=7 , _A : List[str]=3 , _A : List[str]=18 , _A : Dict=30 , _A : Union[str, Any]=4_00 , _A : List[str]=True , _A : List[str]=None , _A : int=True , _A : Tuple=None , _A : Union[str, Any]=True , _A : Tuple=[0.5, 0.5, 0.5] , _A : Union[str, Any]=[0.5, 0.5, 0.5] , _A : Tuple=False , ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = size if size is not None else {'''height''': 20, '''width''': 20} UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : Optional[int] = batch_size UpperCAmelCase_ : Any = num_channels UpperCAmelCase_ : Optional[Any] = image_size UpperCAmelCase_ : Tuple = min_resolution UpperCAmelCase_ : Tuple = max_resolution UpperCAmelCase_ : Optional[int] = do_resize UpperCAmelCase_ : Tuple = size UpperCAmelCase_ : Optional[Any] = do_center_crop UpperCAmelCase_ : Optional[int] = crop_size UpperCAmelCase_ : Tuple = do_normalize UpperCAmelCase_ : Optional[Any] = image_mean UpperCAmelCase_ : int = image_std UpperCAmelCase_ : List[Any] = do_reduce_labels def A ( self : Union[str, Any] ) -> str: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def __UpperCAmelCase ( ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) UpperCAmelCase_ : Optional[Any] = Image.open(dataset[0]['''file'''] ) UpperCAmelCase_ : str = Image.open(dataset[1]['''file'''] ) return image, map def __UpperCAmelCase ( ) -> Any: UpperCAmelCase_ : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) UpperCAmelCase_ : int = Image.open(ds[0]['''file'''] ) UpperCAmelCase_ : Optional[Any] = Image.open(ds[1]['''file'''] ) UpperCAmelCase_ : Dict = Image.open(ds[2]['''file'''] ) UpperCAmelCase_ : List[str] = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class snake_case__ ( UpperCamelCase , unittest.TestCase): a_ = BeitImageProcessor if is_vision_available() else None def A ( self : Optional[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = BeitImageProcessingTester(self ) @property def A ( self : List[Any] ) -> Tuple: return self.image_processor_tester.prepare_image_processor_dict() def A ( self : List[Any] ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''center_crop''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) def A ( self : List[str] ) -> Optional[int]: UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) self.assertEqual(image_processor.do_reduce_labels , _A ) UpperCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) self.assertEqual(image_processor.do_reduce_labels , _A ) def A ( self : Optional[Any] ) -> Any: pass def A ( self : List[str] ) -> Optional[int]: # Initialize image_processing UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase_ : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def A ( self : Union[str, Any] ) -> Union[str, Any]: # Initialize image_processing UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def A ( self : Optional[int] ) -> str: # Initialize image_processing UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def A ( self : Any ) -> Optional[Any]: # Initialize image_processing UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) UpperCAmelCase_ : Union[str, Any] = [] for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input UpperCAmelCase_ : str = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test batched UpperCAmelCase_ : List[Any] = image_processing(_A , _A , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test not batched input (PIL images) UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs() UpperCAmelCase_ : List[str] = image_processing(_A , _A , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test batched input (PIL images) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = prepare_semantic_batch_inputs() UpperCAmelCase_ : int = image_processing(_A , _A , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) def A ( self : List[Any] ) -> Union[str, Any]: # Initialize image_processing UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs() UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 1_50 ) UpperCAmelCase_ : int = True UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
304
1
'''simple docstring''' import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy _UpperCamelCase : Union[str, Any] = logging.getLogger(__name__) def __UpperCAmelCase ( A : torch.nn.Module , A : BnbQuantizationConfig , A : Union[str, os.PathLike] = None , A : Optional[Dict[str, Union[int, str, torch.device]]] = None , A : Optional[List[str]] = None , A : Optional[Dict[Union[int, str], Union[int, str]]] = None , A : Optional[Union[str, os.PathLike]] = None , A : bool = False , ) -> Any: UpperCAmelCase_ : List[str] = bnb_quantization_config.load_in_abit UpperCAmelCase_ : List[str] = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( '''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,''' ''' make sure you have the latest version of `bitsandbytes` installed.''' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( '''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,''' '''make sure you have the latest version of `bitsandbytes` installed.''' ) UpperCAmelCase_ : str = [] # custom device map if isinstance(A , A ) and len(device_map.keys() ) > 1: UpperCAmelCase_ : Optional[int] = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: UpperCAmelCase_ : Optional[int] = get_keys_to_not_convert(A ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(A ) UpperCAmelCase_ : Optional[Any] = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: UpperCAmelCase_ : str = [] UpperCAmelCase_ : Optional[Any] = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(A ) # compatibility with peft UpperCAmelCase_ : Union[str, Any] = load_in_abit UpperCAmelCase_ : Any = load_in_abit UpperCAmelCase_ : int = get_parameter_device(A ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( '''It is not recommended to quantize a loaded model. ''' '''The model should be instantiated under the `init_empty_weights` context manager.''' ) UpperCAmelCase_ : str = replace_with_bnb_layers(A , A , modules_to_not_convert=A ) # convert param to the right dtype UpperCAmelCase_ : int = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: UpperCAmelCase_ : Union[str, Any] = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' ) UpperCAmelCase_ : Tuple = getattr(A , A , A ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(A ): param.to(A ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info( F"The model device type is {model_device.type}. However, cuda is needed for quantization." '''We move the model to cuda.''' ) return model elif weights_location is None: raise RuntimeError( F"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " ) else: with init_empty_weights(): UpperCAmelCase_ : Optional[int] = replace_with_bnb_layers( A , A , modules_to_not_convert=A ) UpperCAmelCase_ : int = get_quantized_model_device_map( A , A , A , max_memory=A , no_split_module_classes=A , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): UpperCAmelCase_ : Optional[int] = True UpperCAmelCase_ : Union[str, Any] = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] ) load_checkpoint_in_model( A , A , A , dtype=bnb_quantization_config.torch_dtype , offload_folder=A , offload_state_dict=A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(A , device_map=A , offload_dir=A ) def __UpperCAmelCase ( A : Optional[int] , A : Dict , A : Any=None , A : Union[str, Any]=None , A : str=None ) -> Union[str, Any]: if device_map is None: if torch.cuda.is_available(): UpperCAmelCase_ : Any = {'''''': torch.cuda.current_device()} else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' ) if isinstance(A , A ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( '''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ''' '''\'sequential\'.''' ) UpperCAmelCase_ : Any = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) UpperCAmelCase_ : Optional[int] = {} UpperCAmelCase_ : Optional[int] = special_dtypes UpperCAmelCase_ : List[Any] = no_split_module_classes UpperCAmelCase_ : Dict = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": UpperCAmelCase_ : List[str] = get_balanced_memory( A , low_zero=(device_map == '''balanced_low_0''') , max_memory=A , **A , ) UpperCAmelCase_ : int = max_memory UpperCAmelCase_ : Dict = infer_auto_device_map(A , **A ) if isinstance(A , A ): # check if don't have any quantized module on the cpu UpperCAmelCase_ : str = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules UpperCAmelCase_ : List[Any] = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( ''' Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. ''' ) else: logger.info( '''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' ) del device_map_without_some_modules return device_map def __UpperCAmelCase ( A : Dict , A : Any , A : Any=None , A : int=None ) -> int: if modules_to_not_convert is None: UpperCAmelCase_ : Dict = [] UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = _replace_with_bnb_layers( A , A , A , A ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def __UpperCAmelCase ( A : int , A : Optional[int] , A : List[str]=None , A : List[str]=None , ) -> Any: UpperCAmelCase_ : Optional[Any] = False for name, module in model.named_children(): if current_key_name is None: UpperCAmelCase_ : Optional[int] = [] current_key_name.append(A ) if isinstance(A , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` UpperCAmelCase_ : Any = '''.'''.join(A ) UpperCAmelCase_ : Optional[Any] = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: UpperCAmelCase_ : Optional[int] = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: UpperCAmelCase_ : Optional[Any] = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: UpperCAmelCase_ : List[str] = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' ) UpperCAmelCase_ : Dict = module.weight.data if module.bias is not None: UpperCAmelCase_ : Union[str, Any] = module.bias.data bnb_module.requires_grad_(A ) setattr(A , A , A ) UpperCAmelCase_ : List[Any] = True if len(list(module.children() ) ) > 0: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = _replace_with_bnb_layers( A , A , A , A ) UpperCAmelCase_ : Optional[int] = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def __UpperCAmelCase ( A : Tuple ) -> List[str]: # Create a copy of the model with init_empty_weights(): UpperCAmelCase_ : Dict = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager` UpperCAmelCase_ : str = find_tied_parameters(A ) # For compatibility with Accelerate < 0.18 if isinstance(A , A ): UpperCAmelCase_ : Optional[Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: UpperCAmelCase_ : List[Any] = sum(A , [] ) UpperCAmelCase_ : Tuple = len(A ) > 0 # Check if it is a base model UpperCAmelCase_ : Optional[int] = False if hasattr(A , '''base_model_prefix''' ): UpperCAmelCase_ : Optional[Any] = not hasattr(A , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head UpperCAmelCase_ : List[str] = list(model.named_children() ) UpperCAmelCase_ : int = [list_modules[-1][0]] # add last module together with tied weights UpperCAmelCase_ : Union[str, Any] = set(A ) - set(A ) UpperCAmelCase_ : Any = list(set(A ) ) + list(A ) # remove ".weight" from the keys UpperCAmelCase_ : List[Any] = ['''.weight''', '''.bias'''] UpperCAmelCase_ : Dict = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: UpperCAmelCase_ : Dict = name.replace(A , '''''' ) filtered_module_names.append(A ) return filtered_module_names def __UpperCAmelCase ( A : int ) -> Optional[int]: for m in model.modules(): if isinstance(A , bnb.nn.Linearabit ): return True return False def __UpperCAmelCase ( A : nn.Module ) -> Optional[int]: return next(parameter.parameters() ).device def __UpperCAmelCase ( A : List[str] , A : List[Any] , A : Optional[Any] , A : Dict , A : Dict , A : List[str] , A : int ) -> Dict: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(A , A , 0 , dtype=A , value=A ) UpperCAmelCase_ : Dict = param_name UpperCAmelCase_ : Any = model if "." in tensor_name: UpperCAmelCase_ : str = tensor_name.split('''.''' ) for split in splits[:-1]: UpperCAmelCase_ : Union[str, Any] = getattr(A , A ) if new_module is None: raise ValueError(F"{module} has no attribute {split}." ) UpperCAmelCase_ : Any = new_module UpperCAmelCase_ : Tuple = splits[-1] # offload weights UpperCAmelCase_ : List[str] = False offload_weight(module._parameters[tensor_name] , A , A , index=A ) if hasattr(module._parameters[tensor_name] , '''SCB''' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , A , index=A , ) else: offload_weight(A , A , A , index=A ) offload_weight(A , param_name.replace('''weight''' , '''SCB''' ) , A , index=A ) set_module_tensor_to_device(A , A , '''meta''' , dtype=A , value=torch.empty(*param.size() ) )
304
'''simple docstring''' import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class snake_case__ ( enum.Enum): a_ = 0 a_ = 1 a_ = 2 @add_end_docstrings(UpperCamelCase) class snake_case__ ( UpperCamelCase): a_ = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n " def __init__( self : List[str] , *_A : Dict , **_A : int ) -> Optional[int]: super().__init__(*_A , **_A ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. UpperCAmelCase_ : Dict = None if self.model.config.prefix is not None: UpperCAmelCase_ : Tuple = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. UpperCAmelCase_ : Optional[Any] = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self._sanitize_parameters(prefix=_A , **self._forward_params ) UpperCAmelCase_ : int = {**self._preprocess_params, **preprocess_params} UpperCAmelCase_ : List[str] = {**self._forward_params, **forward_params} def A ( self : Union[str, Any] , _A : int=None , _A : str=None , _A : Union[str, Any]=None , _A : List[Any]=None , _A : List[Any]=None , _A : int=None , _A : Optional[int]=None , _A : List[Any]=None , **_A : List[Any] , ) -> Dict: UpperCAmelCase_ : Union[str, Any] = {} if prefix is not None: UpperCAmelCase_ : List[Any] = prefix if prefix: UpperCAmelCase_ : Tuple = self.tokenizer( _A , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) UpperCAmelCase_ : List[Any] = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected" ''' [None, \'hole\']''' ) UpperCAmelCase_ : Union[str, Any] = handle_long_generation preprocess_params.update(_A ) UpperCAmelCase_ : Optional[int] = generate_kwargs UpperCAmelCase_ : Tuple = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) UpperCAmelCase_ : int = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) UpperCAmelCase_ : List[Any] = ReturnType.TENSORS if return_type is not None: UpperCAmelCase_ : List[Any] = return_type if clean_up_tokenization_spaces is not None: UpperCAmelCase_ : List[Any] = clean_up_tokenization_spaces if stop_sequence is not None: UpperCAmelCase_ : Any = self.tokenizer.encode(_A , add_special_tokens=_A ) if len(_A ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) UpperCAmelCase_ : str = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def A ( self : Dict , *_A : Optional[Any] , **_A : Any ) -> Any: # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*_A , **_A ) def __call__( self : List[Any] , _A : Union[str, Any] , **_A : List[str] ) -> Dict: return super().__call__(_A , **_A ) def A ( self : List[Any] , _A : List[Any] , _A : Any="" , _A : Dict=None , **_A : Dict ) -> Optional[Any]: UpperCAmelCase_ : Tuple = self.tokenizer( prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) UpperCAmelCase_ : str = prompt_text if handle_long_generation == "hole": UpperCAmelCase_ : List[str] = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: UpperCAmelCase_ : Optional[int] = generate_kwargs['''max_new_tokens'''] else: UpperCAmelCase_ : Union[str, Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: UpperCAmelCase_ : Dict = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) UpperCAmelCase_ : List[str] = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: UpperCAmelCase_ : Optional[int] = inputs['''attention_mask'''][:, -keep_length:] return inputs def A ( self : List[str] , _A : Optional[Any] , **_A : str ) -> Optional[int]: UpperCAmelCase_ : Any = model_inputs['''input_ids'''] UpperCAmelCase_ : Dict = model_inputs.get('''attention_mask''' , _A ) # Allow empty prompts if input_ids.shape[1] == 0: UpperCAmelCase_ : Any = None UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : Union[str, Any] = 1 else: UpperCAmelCase_ : Optional[int] = input_ids.shape[0] UpperCAmelCase_ : Dict = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. UpperCAmelCase_ : List[str] = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: UpperCAmelCase_ : str = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: UpperCAmelCase_ : Any = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length UpperCAmelCase_ : Optional[Any] = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL UpperCAmelCase_ : Union[str, Any] = self.model.generate(input_ids=_A , attention_mask=_A , **_A ) UpperCAmelCase_ : Any = generated_sequence.shape[0] if self.framework == "pt": UpperCAmelCase_ : List[str] = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": UpperCAmelCase_ : int = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def A ( self : int , _A : List[Any] , _A : Dict=ReturnType.FULL_TEXT , _A : Dict=True ) -> Union[str, Any]: UpperCAmelCase_ : List[str] = model_outputs['''generated_sequence'''][0] UpperCAmelCase_ : int = model_outputs['''input_ids'''] UpperCAmelCase_ : str = model_outputs['''prompt_text'''] UpperCAmelCase_ : Any = generated_sequence.numpy().tolist() UpperCAmelCase_ : int = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: UpperCAmelCase_ : Optional[Any] = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text UpperCAmelCase_ : Any = self.tokenizer.decode( _A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: UpperCAmelCase_ : List[str] = 0 else: UpperCAmelCase_ : str = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) ) if return_type == ReturnType.FULL_TEXT: UpperCAmelCase_ : Dict = prompt_text + text[prompt_length:] else: UpperCAmelCase_ : Dict = text[prompt_length:] UpperCAmelCase_ : List[str] = {'''generated_text''': all_text} records.append(_A ) return records
304
1
'''simple docstring''' def __UpperCAmelCase ( A : list ) -> list: if len(A ) <= 1: return lst UpperCAmelCase_ : Any = 1 while i < len(A ): if lst[i - 1] <= lst[i]: i += 1 else: UpperCAmelCase_ , UpperCAmelCase_ : int = lst[i], lst[i - 1] i -= 1 if i == 0: UpperCAmelCase_ : Tuple = 1 return lst if __name__ == "__main__": _UpperCamelCase : Any = input('Enter numbers separated by a comma:\n').strip() _UpperCamelCase : int = [int(item) for item in user_input.split(',')] print(gnome_sort(unsorted))
304
'''simple docstring''' from __future__ import annotations import math def __UpperCAmelCase ( A : int , A : int , A : bool , A : list[int] , A : float ) -> int: if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if not scores: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , ) if is_max else min( minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , ) ) def __UpperCAmelCase ( ) -> None: UpperCAmelCase_ : List[str] = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3] UpperCAmelCase_ : List[Any] = math.log(len(A ) , 2 ) print(F"Optimal value : {minimax(0 , 0 , A , A , A )}" ) if __name__ == "__main__": import doctest doctest.testmod() main()
304
1
'''simple docstring''' import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) def __UpperCAmelCase ( A : Optional[int] ) -> List[str]: UpperCAmelCase_ : List[str] = torch.load(A , map_location='''cpu''' ) if "model" in sd.keys(): UpperCAmelCase_ : Union[str, Any] = torch.load(A , map_location='''cpu''' )['''model'''] # pop unnecessary weights UpperCAmelCase_ : Tuple = [ '''decoder.version''', '''decoder.output_projection.weight''', ] for key in keys_to_delete: if key in sd: sd.pop(A ) UpperCAmelCase_ : str = { '''decoder.project_in_dim.weight''': '''decoder.project_in.weight''', '''decoder.project_out_dim.weight''': '''decoder.project_out.weight''', '''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''', } for old_key, new_key in keys_to_rename.items(): if old_key in sd: UpperCAmelCase_ : Union[str, Any] = sd.pop(A ) UpperCAmelCase_ : Dict = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: UpperCAmelCase_ : Any = sd[key] # We split QKV in separate Q,K,V UpperCAmelCase_ : Optional[int] = key.replace('''.qkv_proj.''' , '''.q_proj.''' ) UpperCAmelCase_ : List[str] = key.replace('''.qkv_proj.''' , '''.k_proj.''' ) UpperCAmelCase_ : Optional[int] = key.replace('''.qkv_proj.''' , '''.v_proj.''' ) UpperCAmelCase_ : Tuple = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = torch.split(A , depth // 3 , dim=0 ) UpperCAmelCase_ : Dict = q UpperCAmelCase_ : Optional[int] = k UpperCAmelCase_ : Optional[Any] = v del sd[key] return sd @torch.no_grad() def __UpperCAmelCase ( A : Dict , A : Union[str, Any] , A : Any=None ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = load_checkpoint(A ) if config is not None: UpperCAmelCase_ : Any = OPTConfig.from_pretrained(A ) else: UpperCAmelCase_ : Dict = OPTConfig() UpperCAmelCase_ : Union[str, Any] = OPTModel(A ).half().eval() model.load_state_dict(A ) # Check results Path(A ).mkdir(exist_ok=A ) model.save_pretrained(A ) if __name__ == "__main__": _UpperCamelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--fairseq_path', type=str, help=( 'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:' ' https://huggingface.co/models?other=opt_metasq' ), ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.') _UpperCamelCase : List[str] = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
304
'''simple docstring''' from __future__ import annotations def __UpperCAmelCase ( A : list , A : int , A : int , A : int ) -> list: UpperCAmelCase_ : Any = [] UpperCAmelCase_ , UpperCAmelCase_ : Tuple = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) UpperCAmelCase_ : List[Any] = result + left + right return input_list def __UpperCAmelCase ( A : list ) -> list: if len(A ) <= 1: return input_list UpperCAmelCase_ : List[str] = list(A ) # iteration for two-way merging UpperCAmelCase_ : Tuple = 2 while p <= len(A ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(A ) , A ): UpperCAmelCase_ : Union[str, Any] = i UpperCAmelCase_ : int = i + p - 1 UpperCAmelCase_ : Any = (low + high + 1) // 2 UpperCAmelCase_ : Union[str, Any] = merge(A , A , A , A ) # final merge of last two parts if p * 2 >= len(A ): UpperCAmelCase_ : str = i UpperCAmelCase_ : Tuple = merge(A , 0 , A , len(A ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": _UpperCamelCase : str = input('Enter numbers separated by a comma:\n').strip() if user_input == "": _UpperCamelCase : List[str] = [] else: _UpperCamelCase : Optional[int] = [int(item.strip()) for item in user_input.split(',')] print(iter_merge_sort(unsorted))
304
1
'''simple docstring''' from math import factorial class snake_case__ : def __init__( self : Optional[int] , _A : List[str] , _A : List[str] ) -> Any: UpperCAmelCase_ : str = real if isinstance(_A , _A ): UpperCAmelCase_ : Dict = [1] * rank else: UpperCAmelCase_ : Optional[Any] = rank def __repr__( self : Dict ) -> str: return ( F"{self.real}+" F"{'+'.join(str(_A )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}" ) def A ( self : int ) -> str: UpperCAmelCase_ : str = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real , _A ) def __add__( self : Union[str, Any] , _A : Dict ) -> Dict: if not isinstance(_A , _A ): return Dual(self.real + other , self.duals ) UpperCAmelCase_ : Optional[Any] = self.duals.copy() UpperCAmelCase_ : Union[str, Any] = other.duals.copy() if len(_A ) > len(_A ): o_dual.extend([1] * (len(_A ) - len(_A )) ) elif len(_A ) < len(_A ): s_dual.extend([1] * (len(_A ) - len(_A )) ) UpperCAmelCase_ : str = [] for i in range(len(_A ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real , _A ) a_ = __add__ def __sub__( self : int , _A : str ) -> Union[str, Any]: return self + other * -1 def __mul__( self : Optional[int] , _A : Tuple ) -> List[str]: if not isinstance(_A , _A ): UpperCAmelCase_ : Optional[Any] = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other , _A ) UpperCAmelCase_ : Dict = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , _A ) a_ = __mul__ def __truediv__( self : Union[str, Any] , _A : Tuple ) -> Union[str, Any]: if not isinstance(_A , _A ): UpperCAmelCase_ : Union[str, Any] = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other , _A ) raise ValueError def __floordiv__( self : Dict , _A : Optional[Any] ) -> Dict: if not isinstance(_A , _A ): UpperCAmelCase_ : Union[str, Any] = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other , _A ) raise ValueError def __pow__( self : Tuple , _A : Tuple ) -> Tuple: if n < 0 or isinstance(_A , _A ): raise ValueError('''power must be a positive integer''' ) if n == 0: return 1 if n == 1: return self UpperCAmelCase_ : Union[str, Any] = self for _ in range(n - 1 ): x *= self return x def __UpperCAmelCase ( A : Tuple , A : Any , A : Union[str, Any] ) -> Tuple: if not callable(A ): raise ValueError('''differentiate() requires a function as input for func''' ) if not isinstance(A , (float, int) ): raise ValueError('''differentiate() requires a float as input for position''' ) if not isinstance(A , A ): raise ValueError('''differentiate() requires an int as input for order''' ) UpperCAmelCase_ : Any = Dual(A , 1 ) UpperCAmelCase_ : Any = func(A ) if order == 0: return result.real return result.duals[order - 1] * factorial(A ) if __name__ == "__main__": import doctest doctest.testmod() def __UpperCAmelCase ( A : Union[str, Any] ) -> List[Any]: return y**2 * y**4 print(differentiate(f, 9, 2))
304
'''simple docstring''' from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class snake_case__ : a_ = 42 # [batch_size x 3] a_ = 42 # [batch_size x 3] a_ = 42 # [batch_size x 3] a_ = 42 # [batch_size x 3] a_ = 42 a_ = 42 a_ = 42 a_ = 42 a_ = 42 def A ( self : Tuple ) -> Optional[int]: assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def A ( self : List[Any] ) -> Union[str, Any]: return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def A ( self : Any ) -> Optional[Any]: return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def A ( self : Optional[int] ) -> torch.Tensor: UpperCAmelCase_ : Dict = torch.arange(self.height * self.width ) UpperCAmelCase_ : int = torch.stack( [ pixel_indices % self.width, torch.div(_A , self.width , rounding_mode='''trunc''' ), ] , axis=1 , ) return coords @property def A ( self : Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ , *UpperCAmelCase_ : Union[str, Any] = self.shape UpperCAmelCase_ : Optional[Any] = int(np.prod(_A ) ) UpperCAmelCase_ : Any = self.get_image_coords() UpperCAmelCase_ : Any = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) UpperCAmelCase_ : Union[str, Any] = self.get_camera_rays(_A ) UpperCAmelCase_ : str = rays.view(_A , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def A ( self : Optional[int] , _A : torch.Tensor ) -> torch.Tensor: UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] UpperCAmelCase_ : Dict = coords.view(_A , -1 , 2 ) UpperCAmelCase_ : Union[str, Any] = self.resolution() UpperCAmelCase_ : int = self.fov() UpperCAmelCase_ : Dict = (flat.float() / (res - 1)) * 2 - 1 UpperCAmelCase_ : Optional[int] = fracs * torch.tan(fov / 2 ) UpperCAmelCase_ : Any = fracs.view(_A , -1 , 2 ) UpperCAmelCase_ : List[Any] = ( self.z.view(_A , 1 , 3 ) + self.x.view(_A , 1 , 3 ) * fracs[:, :, :1] + self.y.view(_A , 1 , 3 ) * fracs[:, :, 1:] ) UpperCAmelCase_ : Optional[Any] = directions / directions.norm(dim=-1 , keepdim=_A ) UpperCAmelCase_ : Union[str, Any] = torch.stack( [ torch.broadcast_to(self.origin.view(_A , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(_A , *_A , 2 , 3 ) def A ( self : Tuple , _A : int , _A : int ) -> "DifferentiableProjectiveCamera": assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=_A , height=_A , x_fov=self.x_fov , y_fov=self.y_fov , ) def __UpperCAmelCase ( A : int ) -> DifferentiableProjectiveCamera: UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : str = [] for theta in np.linspace(0 , 2 * np.pi , num=2_0 ): UpperCAmelCase_ : str = np.array([np.sin(A ), np.cos(A ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) UpperCAmelCase_ : Optional[int] = -z * 4 UpperCAmelCase_ : Optional[int] = np.array([np.cos(A ), -np.sin(A ), 0.0] ) UpperCAmelCase_ : List[Any] = np.cross(A , A ) origins.append(A ) xs.append(A ) ys.append(A ) zs.append(A ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(A , axis=0 ) ).float() , x=torch.from_numpy(np.stack(A , axis=0 ) ).float() , y=torch.from_numpy(np.stack(A , axis=0 ) ).float() , z=torch.from_numpy(np.stack(A , axis=0 ) ).float() , width=A , height=A , x_fov=0.7 , y_fov=0.7 , shape=(1, len(A )) , )
304
1
'''simple docstring''' import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def __UpperCAmelCase ( A : str , A : Optional[int]=1_0 ) -> int: UpperCAmelCase_ : List[str] = [] for _ in range(A ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def __UpperCAmelCase ( A : Union[str, Any] , A : Optional[Any]=1_0 ) -> Dict: UpperCAmelCase_ : Optional[int] = [] for step in range(A ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ : Union[str, Any] = os.path.join(A , '''schedule.bin''' ) torch.save(scheduler.state_dict() , A ) UpperCAmelCase_ : Union[str, Any] = torch.load(A ) scheduler.load_state_dict(A ) return lrs @require_torch class snake_case__ ( unittest.TestCase): def A ( self : Any , _A : Optional[int] , _A : Any , _A : List[str] ) -> Union[str, Any]: self.assertEqual(len(_A ) , len(_A ) ) for a, b in zip(_A , _A ): self.assertAlmostEqual(_A , _A , delta=_A ) def A ( self : Tuple ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_A ) UpperCAmelCase_ : Tuple = torch.tensor([0.4, 0.2, -0.5] ) UpperCAmelCase_ : Optional[int] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCAmelCase_ : Any = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(1_00 ): UpperCAmelCase_ : Dict = criterion(_A , _A ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def A ( self : Dict ) -> Optional[int]: UpperCAmelCase_ : List[str] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_A ) UpperCAmelCase_ : int = torch.tensor([0.4, 0.2, -0.5] ) UpperCAmelCase_ : str = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCAmelCase_ : Any = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_A , weight_decay=0.0 , relative_step=_A , scale_parameter=_A , warmup_init=_A , ) for _ in range(10_00 ): UpperCAmelCase_ : List[Any] = criterion(_A , _A ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class snake_case__ ( unittest.TestCase): a_ = nn.Linear(50 , 50) if is_torch_available() else None a_ = AdamW(m.parameters() , lr=10.0) if is_torch_available() else None a_ = 10 def A ( self : Any , _A : Dict , _A : Dict , _A : Any , _A : int=None ) -> List[Any]: self.assertEqual(len(_A ) , len(_A ) ) for a, b in zip(_A , _A ): self.assertAlmostEqual(_A , _A , delta=_A , msg=_A ) def A ( self : int ) -> int: UpperCAmelCase_ : Union[str, Any] = {'''num_warmup_steps''': 2, '''num_training_steps''': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) UpperCAmelCase_ : List[str] = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'''num_warmup_steps''': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, '''num_cycles''': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'''num_warmup_steps''': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): UpperCAmelCase_ , UpperCAmelCase_ : Any = data UpperCAmelCase_ : List[str] = scheduler_func(self.optimizer , **_A ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) UpperCAmelCase_ : Optional[int] = unwrap_schedule(_A , self.num_steps ) self.assertListAlmostEqual( _A , _A , tol=1e-2 , msg=F"failed for {scheduler_func} in normal scheduler" , ) UpperCAmelCase_ : int = scheduler_func(self.optimizer , **_A ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(_A ) # wrap to test picklability of the schedule UpperCAmelCase_ : List[str] = unwrap_and_save_reload_schedule(_A , self.num_steps ) self.assertListEqual(_A , _A , msg=F"failed for {scheduler_func} in save and reload" ) class snake_case__ : def __init__( self : Optional[Any] , _A : Optional[int] ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = fn def __call__( self : Tuple , *_A : Optional[Any] , **_A : List[str] ) -> List[Any]: return self.fn(*_A , **_A ) @classmethod def A ( self : Optional[Any] , _A : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = list(map(self , scheduler.lr_lambdas ) )
304
'''simple docstring''' import random class snake_case__ : @staticmethod def A ( _A : str ) -> tuple[list[int], list[int]]: UpperCAmelCase_ : Dict = [ord(_A ) for i in text] UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : Any = [] for i in plain: UpperCAmelCase_ : int = random.randint(1 , 3_00 ) UpperCAmelCase_ : str = (i + k) * k cipher.append(_A ) key.append(_A ) return cipher, key @staticmethod def A ( _A : list[int] , _A : list[int] ) -> str: UpperCAmelCase_ : Dict = [] for i in range(len(_A ) ): UpperCAmelCase_ : int = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(_A ) ) return "".join(_A ) if __name__ == "__main__": _UpperCamelCase , _UpperCamelCase : Any = Onepad().encrypt('Hello') print(c, k) print(Onepad().decrypt(c, k))
304
1
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
304
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _UpperCamelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class snake_case__ ( UpperCamelCase , unittest.TestCase): a_ = ReformerTokenizer a_ = ReformerTokenizerFast a_ = True a_ = False a_ = True def A ( self : Optional[Any] ) -> List[Any]: super().setUp() UpperCAmelCase_ : Tuple = ReformerTokenizer(_A , keep_accents=_A ) tokenizer.save_pretrained(self.tmpdirname ) def A ( self : Optional[Any] ) -> Any: UpperCAmelCase_ : List[Any] = '''<s>''' UpperCAmelCase_ : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def A ( self : Any ) -> str: UpperCAmelCase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(_A ) , 10_00 ) def A ( self : Optional[int] ) -> int: self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def A ( self : Optional[Any] ) -> List[Any]: if not self.test_rust_tokenizer: return UpperCAmelCase_ : int = self.get_tokenizer() UpperCAmelCase_ : Tuple = self.get_rust_tokenizer() UpperCAmelCase_ : Any = '''I was born in 92000, and this is falsé.''' UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize(_A ) UpperCAmelCase_ : Optional[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) UpperCAmelCase_ : List[str] = tokenizer.encode(_A , add_special_tokens=_A ) UpperCAmelCase_ : int = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) UpperCAmelCase_ : Tuple = self.get_rust_tokenizer() UpperCAmelCase_ : Dict = tokenizer.encode(_A ) UpperCAmelCase_ : List[str] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def A ( self : Tuple , _A : Dict=15 ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(_A , **_A ) # Simple input UpperCAmelCase_ : Optional[int] = '''This is a simple input''' UpperCAmelCase_ : List[str] = ['''This is a simple input 1''', '''This is a simple input 2'''] UpperCAmelCase_ : Union[str, Any] = ('''This is a simple input''', '''This is a pair''') UpperCAmelCase_ : Dict = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' ) # Simple input self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' ) # Simple input self.assertRaises( _A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , ) # Pair input self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' ) # Pair input self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' ) # Pair input self.assertRaises( _A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , ) def A ( self : Union[str, Any] ) -> int: pass def A ( self : int ) -> Any: UpperCAmelCase_ : Any = ReformerTokenizer(_A , keep_accents=_A ) UpperCAmelCase_ : List[str] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_A ) , [2_85, 46, 10, 1_70, 3_82] , ) UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) UpperCAmelCase_ : List[str] = tokenizer.convert_tokens_to_ids(_A ) self.assertListEqual( _A , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def A ( self : List[str] ) -> Optional[int]: return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' ) @slow def A ( self : str ) -> str: UpperCAmelCase_ : Tuple = '''Hello World!''' UpperCAmelCase_ : int = [1_26, 32, 2_62, 1_52, 38, 72, 2_87] self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @slow def A ( self : List[Any] ) -> str: UpperCAmelCase_ : Tuple = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) UpperCAmelCase_ : int = [ 1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 35, 28, 2_75, 3, 2_59, 2_97, 2_60, 84, 4, 35, 1_10, 44, 8, 2_59, 91, 2_68, 21, 11, 2_09, 2_74, 1_09, 2_66, 2_77, 1_17, 86, 93, 3_15, 2_58, 2_78, 2_58, 2_77, 2_58, 0, 2_58, 2_88, 2_58, 3_19, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 2_87, 2_58, 3_15, 2_58, 2_89, 2_58, 2_78, 99, 2_69, 2_66, 2_62, 8, 2_59, 2_41, 4, 2_17, 2_30, 2_68, 2_66, 55, 1_68, 1_06, 75, 1_93, 2_66, 2_23, 27, 49, 26, 2_82, 25, 2_64, 2_99, 19, 26, 0, 2_58, 2_77, 1_17, 86, 93, 1_76, 1_83, 2_70, 11, 2_62, 42, 61, 2_65, ] self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @require_torch @slow def A ( self : List[str] ) -> Optional[int]: import torch from transformers import ReformerConfig, ReformerModel # Build sequence UpperCAmelCase_ : int = list(self.big_tokenizer.get_vocab().keys() )[:10] UpperCAmelCase_ : List[Any] = ''' '''.join(_A ) UpperCAmelCase_ : str = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' ) UpperCAmelCase_ : Any = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' ) UpperCAmelCase_ : List[Any] = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) UpperCAmelCase_ : Any = encoded_sequence['''input_ids'''].shape UpperCAmelCase_ : Optional[int] = ReformerModel(_A ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**_A ) model(**_A ) @slow def A ( self : int ) -> Optional[Any]: # fmt: off UpperCAmelCase_ : int = {'''input_ids''': [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 UpperCAmelCase_ : Optional[Any] = [ '''This is a very simple sentence.''', '''The quick brown fox jumps over the lazy dog.''', ] self.tokenizer_integration_test_util( expected_encoding=_A , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=_A , sequences=_A , )
304
1
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def __UpperCAmelCase ( A : str , A : List[Any]=0.999 , A : Optional[int]="cosine" , ) -> Union[str, Any]: if alpha_transform_type == "cosine": def alpha_bar_fn(A : Tuple ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(A : Tuple ): return math.exp(t * -12.0 ) else: raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" ) UpperCAmelCase_ : List[str] = [] for i in range(A ): UpperCAmelCase_ : Optional[Any] = i / num_diffusion_timesteps UpperCAmelCase_ : Dict = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(A ) / alpha_bar_fn(A ) , A ) ) return torch.tensor(A , dtype=torch.floataa ) class snake_case__ ( UpperCamelCase , UpperCamelCase): a_ = [e.name for e in KarrasDiffusionSchedulers] a_ = 2 @register_to_config def __init__( self : Optional[Any] , _A : int = 10_00 , _A : float = 0.00_085 , _A : float = 0.012 , _A : str = "linear" , _A : Optional[Union[np.ndarray, List[float]]] = None , _A : str = "epsilon" , _A : Optional[bool] = False , _A : Optional[bool] = False , _A : float = 1.0 , _A : str = "linspace" , _A : int = 0 , ) -> Dict: if trained_betas is not None: UpperCAmelCase_ : Any = torch.tensor(_A , dtype=torch.floataa ) elif beta_schedule == "linear": UpperCAmelCase_ : str = torch.linspace(_A , _A , _A , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. UpperCAmelCase_ : Tuple = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , _A , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule UpperCAmelCase_ : List[str] = betas_for_alpha_bar(_A , alpha_transform_type='''cosine''' ) elif beta_schedule == "exp": UpperCAmelCase_ : List[Any] = betas_for_alpha_bar(_A , alpha_transform_type='''exp''' ) else: raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" ) UpperCAmelCase_ : int = 1.0 - self.betas UpperCAmelCase_ : Dict = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(_A , _A , _A ) UpperCAmelCase_ : Tuple = use_karras_sigmas def A ( self : Any , _A : Any , _A : Any=None ) -> Union[str, Any]: if schedule_timesteps is None: UpperCAmelCase_ : Any = self.timesteps UpperCAmelCase_ : List[Any] = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: UpperCAmelCase_ : Union[str, Any] = 1 if len(_A ) > 1 else 0 else: UpperCAmelCase_ : int = timestep.cpu().item() if torch.is_tensor(_A ) else timestep UpperCAmelCase_ : Optional[int] = self._index_counter[timestep_int] return indices[pos].item() @property def A ( self : Optional[int] ) -> Tuple: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def A ( self : Any , _A : torch.FloatTensor , _A : Union[float, torch.FloatTensor] , ) -> torch.FloatTensor: UpperCAmelCase_ : Any = self.index_for_timestep(_A ) UpperCAmelCase_ : int = self.sigmas[step_index] UpperCAmelCase_ : List[Any] = sample / ((sigma**2 + 1) ** 0.5) return sample def A ( self : Union[str, Any] , _A : int , _A : Union[str, torch.device] = None , _A : Optional[int] = None , ) -> List[str]: UpperCAmelCase_ : Union[str, Any] = num_inference_steps UpperCAmelCase_ : int = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": UpperCAmelCase_ : Optional[Any] = np.linspace(0 , num_train_timesteps - 1 , _A , dtype=_A )[::-1].copy() elif self.config.timestep_spacing == "leading": UpperCAmelCase_ : List[str] = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 UpperCAmelCase_ : Tuple = (np.arange(0 , _A ) * step_ratio).round()[::-1].copy().astype(_A ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": UpperCAmelCase_ : Union[str, Any] = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 UpperCAmelCase_ : Tuple = (np.arange(_A , 0 , -step_ratio )).round().copy().astype(_A ) timesteps -= 1 else: raise ValueError( F"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." ) UpperCAmelCase_ : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) UpperCAmelCase_ : Union[str, Any] = np.log(_A ) UpperCAmelCase_ : List[str] = np.interp(_A , np.arange(0 , len(_A ) ) , _A ) if self.config.use_karras_sigmas: UpperCAmelCase_ : int = self._convert_to_karras(in_sigmas=_A , num_inference_steps=self.num_inference_steps ) UpperCAmelCase_ : Optional[Any] = np.array([self._sigma_to_t(_A , _A ) for sigma in sigmas] ) UpperCAmelCase_ : Optional[Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) UpperCAmelCase_ : str = torch.from_numpy(_A ).to(device=_A ) UpperCAmelCase_ : Dict = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) UpperCAmelCase_ : Union[str, Any] = torch.from_numpy(_A ) UpperCAmelCase_ : Tuple = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(_A ).startswith('''mps''' ): # mps does not support float64 UpperCAmelCase_ : List[Any] = timesteps.to(_A , dtype=torch.floataa ) else: UpperCAmelCase_ : str = timesteps.to(device=_A ) # empty dt and derivative UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : Dict = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter UpperCAmelCase_ : Optional[Any] = defaultdict(_A ) def A ( self : Dict , _A : Optional[Any] , _A : Tuple ) -> Union[str, Any]: # get log sigma UpperCAmelCase_ : str = np.log(_A ) # get distribution UpperCAmelCase_ : List[str] = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range UpperCAmelCase_ : List[str] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) UpperCAmelCase_ : Optional[int] = low_idx + 1 UpperCAmelCase_ : Tuple = log_sigmas[low_idx] UpperCAmelCase_ : str = log_sigmas[high_idx] # interpolate sigmas UpperCAmelCase_ : Union[str, Any] = (low - log_sigma) / (low - high) UpperCAmelCase_ : Optional[Any] = np.clip(_A , 0 , 1 ) # transform interpolation to time range UpperCAmelCase_ : List[str] = (1 - w) * low_idx + w * high_idx UpperCAmelCase_ : Tuple = t.reshape(sigma.shape ) return t def A ( self : Dict , _A : torch.FloatTensor , _A : int ) -> torch.FloatTensor: UpperCAmelCase_ : float = in_sigmas[-1].item() UpperCAmelCase_ : float = in_sigmas[0].item() UpperCAmelCase_ : int = 7.0 # 7.0 is the value used in the paper UpperCAmelCase_ : List[Any] = np.linspace(0 , 1 , _A ) UpperCAmelCase_ : int = sigma_min ** (1 / rho) UpperCAmelCase_ : Dict = sigma_max ** (1 / rho) UpperCAmelCase_ : int = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def A ( self : Any ) -> Tuple: return self.dt is None def A ( self : str , _A : Union[torch.FloatTensor, np.ndarray] , _A : Union[float, torch.FloatTensor] , _A : Union[torch.FloatTensor, np.ndarray] , _A : bool = True , ) -> Union[SchedulerOutput, Tuple]: UpperCAmelCase_ : int = self.index_for_timestep(_A ) # advance index counter by 1 UpperCAmelCase_ : Dict = timestep.cpu().item() if torch.is_tensor(_A ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: UpperCAmelCase_ : Tuple = self.sigmas[step_index] UpperCAmelCase_ : Tuple = self.sigmas[step_index + 1] else: # 2nd order / Heun's method UpperCAmelCase_ : Dict = self.sigmas[step_index - 1] UpperCAmelCase_ : Optional[Any] = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API UpperCAmelCase_ : Union[str, Any] = 0 UpperCAmelCase_ : Optional[int] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": UpperCAmelCase_ : Optional[int] = sigma_hat if self.state_in_first_order else sigma_next UpperCAmelCase_ : List[Any] = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": UpperCAmelCase_ : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_next UpperCAmelCase_ : int = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": UpperCAmelCase_ : str = model_output else: raise ValueError( F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" ) if self.config.clip_sample: UpperCAmelCase_ : int = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order UpperCAmelCase_ : List[Any] = (sample - pred_original_sample) / sigma_hat # 3. delta timestep UpperCAmelCase_ : Optional[int] = sigma_next - sigma_hat # store for 2nd order step UpperCAmelCase_ : Tuple = derivative UpperCAmelCase_ : int = dt UpperCAmelCase_ : Any = sample else: # 2. 2nd order / Heun's method UpperCAmelCase_ : List[str] = (sample - pred_original_sample) / sigma_next UpperCAmelCase_ : int = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample UpperCAmelCase_ : Dict = self.dt UpperCAmelCase_ : Tuple = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : Dict = None UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : Any = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_A ) def A ( self : Optional[int] , _A : torch.FloatTensor , _A : torch.FloatTensor , _A : torch.FloatTensor , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples UpperCAmelCase_ : Optional[Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(_A ): # mps does not support float64 UpperCAmelCase_ : Optional[Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa ) UpperCAmelCase_ : int = timesteps.to(original_samples.device , dtype=torch.floataa ) else: UpperCAmelCase_ : Tuple = self.timesteps.to(original_samples.device ) UpperCAmelCase_ : List[Any] = timesteps.to(original_samples.device ) UpperCAmelCase_ : Any = [self.index_for_timestep(_A , _A ) for t in timesteps] UpperCAmelCase_ : Optional[Any] = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): UpperCAmelCase_ : List[str] = sigma.unsqueeze(-1 ) UpperCAmelCase_ : str = original_samples + noise * sigma return noisy_samples def __len__( self : List[Any] ) -> Union[str, Any]: return self.config.num_train_timesteps
304
'''simple docstring''' from __future__ import annotations def __UpperCAmelCase ( A : str ) -> list[int]: return [ord(A ) - 9_6 for elem in plain] def __UpperCAmelCase ( A : list[int] ) -> str: return "".join(chr(elem + 9_6 ) for elem in encoded ) def __UpperCAmelCase ( ) -> None: UpperCAmelCase_ : Tuple = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''' , A ) print('''Decoded:''' , decode(A ) ) if __name__ == "__main__": main()
304
1
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging _UpperCamelCase : Dict = logging.get_logger(__name__) _UpperCamelCase : str = '▁' _UpperCamelCase : Union[str, Any] = { 'vocab_file': 'vocab.json', 'spm_file': 'sentencepiece.bpe.model', 'tokenizer_config_file': 'tokenizer_config.json', } _UpperCamelCase : Tuple = { 'vocab_file': { 'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json', 'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json', }, 'spm_file': { 'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model', 'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model', }, 'tokenizer_config_file': { 'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json', 'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json', }, } _UpperCamelCase : List[str] = { 'facebook/m2m100_418M': 1_024, } # fmt: off _UpperCamelCase : Union[str, Any] = { 'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'], 'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de'] } class snake_case__ ( UpperCamelCase): a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = ["input_ids", "attention_mask"] a_ = [] a_ = [] def __init__( self : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : Dict=None , _A : List[str]=None , _A : Union[str, Any]="<s>" , _A : Union[str, Any]="</s>" , _A : Any="</s>" , _A : Dict="<pad>" , _A : Tuple="<unk>" , _A : Union[str, Any]="m2m100" , _A : Optional[Dict[str, Any]] = None , _A : Tuple=8 , **_A : int , ) -> None: UpperCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs UpperCAmelCase_ : Optional[int] = language_codes UpperCAmelCase_ : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes] UpperCAmelCase_ : int = {lang_code: F"__{lang_code}__" for lang_code in fairseq_language_code} UpperCAmelCase_ : Union[str, Any] = kwargs.get('''additional_special_tokens''' , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(_A ) for lang_code in fairseq_language_code if self.get_lang_token(_A ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=_A , tgt_lang=_A , bos_token=_A , eos_token=_A , sep_token=_A , unk_token=_A , pad_token=_A , language_codes=_A , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=_A , **_A , ) UpperCAmelCase_ : Any = vocab_file UpperCAmelCase_ : Tuple = load_json(_A ) UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()} UpperCAmelCase_ : Any = spm_file UpperCAmelCase_ : int = load_spm(_A , self.sp_model_kwargs ) UpperCAmelCase_ : Optional[int] = len(self.encoder ) UpperCAmelCase_ : Tuple = { self.get_lang_token(_A ): self.encoder_size + i for i, lang_code in enumerate(_A ) } UpperCAmelCase_ : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_A )} UpperCAmelCase_ : List[str] = {v: k for k, v in self.lang_token_to_id.items()} UpperCAmelCase_ : Optional[int] = src_lang if src_lang is not None else '''en''' UpperCAmelCase_ : Optional[int] = tgt_lang UpperCAmelCase_ : Optional[int] = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) UpperCAmelCase_ : Optional[Any] = num_madeup_words @property def A ( self : Optional[int] ) -> int: return len(self.encoder ) + len(self.lang_token_to_id ) @property def A ( self : int ) -> str: return self._src_lang @src_lang.setter def A ( self : Optional[Any] , _A : str ) -> None: UpperCAmelCase_ : List[Any] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def A ( self : Optional[Any] , _A : str ) -> List[str]: return self.sp_model.encode(_A , out_type=_A ) def A ( self : int , _A : int ) -> List[Any]: if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(_A , self.encoder[self.unk_token] ) def A ( self : str , _A : int ) -> str: if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(_A , self.unk_token ) def A ( self : Union[str, Any] , _A : Tuple ) -> Tuple: UpperCAmelCase_ : List[Any] = [] UpperCAmelCase_ : Tuple = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_A ) + token UpperCAmelCase_ : List[Any] = [] else: current_sub_tokens.append(_A ) out_string += self.sp_model.decode(_A ) return out_string.strip() def A ( self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) UpperCAmelCase_ : Optional[Any] = [1] * len(self.prefix_tokens ) UpperCAmelCase_ : Any = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_A )) + suffix_ones return prefix_ones + ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones def A ( self : str , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def A ( self : List[str] ) -> Dict: UpperCAmelCase_ : List[str] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[Any] ) -> Dict: UpperCAmelCase_ : Union[str, Any] = self.__dict__.copy() UpperCAmelCase_ : Union[str, Any] = None return state def __setstate__( self : int , _A : Dict ) -> None: UpperCAmelCase_ : List[str] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCAmelCase_ : Dict = {} UpperCAmelCase_ : Any = load_spm(self.spm_file , self.sp_model_kwargs ) def A ( self : Optional[int] , _A : str , _A : Optional[str] = None ) -> Tuple[str]: UpperCAmelCase_ : Optional[int] = Path(_A ) if not save_dir.is_dir(): raise OSError(F"{save_directory} should be a directory" ) UpperCAmelCase_ : Dict = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) UpperCAmelCase_ : int = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder , _A ) if os.path.abspath(self.spm_file ) != os.path.abspath(_A ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , _A ) elif not os.path.isfile(self.spm_file ): with open(_A , '''wb''' ) as fi: UpperCAmelCase_ : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(_A ) return (str(_A ), str(_A )) def A ( self : int , _A : List[str] , _A : str = "en" , _A : Optional[List[str]] = None , _A : str = "ro" , **_A : Optional[Any] , ) -> BatchEncoding: UpperCAmelCase_ : int = src_lang UpperCAmelCase_ : Dict = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(_A , _A , **_A ) def A ( self : Tuple , _A : Optional[int] , _A : Optional[str] , _A : Optional[str] , **_A : Union[str, Any] ) -> Optional[Any]: if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) UpperCAmelCase_ : Optional[Any] = src_lang UpperCAmelCase_ : List[Any] = self(_A , add_special_tokens=_A , **_A ) UpperCAmelCase_ : Any = self.get_lang_id(_A ) UpperCAmelCase_ : Any = tgt_lang_id return inputs def A ( self : Optional[int] ) -> str: self.set_src_lang_special_tokens(self.src_lang ) def A ( self : str ) -> Union[str, Any]: self.set_tgt_lang_special_tokens(self.tgt_lang ) def A ( self : Optional[int] , _A : str ) -> None: UpperCAmelCase_ : int = self.get_lang_token(_A ) UpperCAmelCase_ : str = self.lang_token_to_id[lang_token] UpperCAmelCase_ : Dict = [self.cur_lang_id] UpperCAmelCase_ : Optional[int] = [self.eos_token_id] def A ( self : List[str] , _A : str ) -> None: UpperCAmelCase_ : Optional[int] = self.get_lang_token(_A ) UpperCAmelCase_ : Any = self.lang_token_to_id[lang_token] UpperCAmelCase_ : List[Any] = [self.cur_lang_id] UpperCAmelCase_ : List[Any] = [self.eos_token_id] def A ( self : Optional[Any] , _A : str ) -> str: return self.lang_code_to_token[lang] def A ( self : Union[str, Any] , _A : str ) -> int: UpperCAmelCase_ : Optional[Any] = self.get_lang_token(_A ) return self.lang_token_to_id[lang_token] def __UpperCAmelCase ( A : str , A : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: UpperCAmelCase_ : Optional[Any] = sentencepiece.SentencePieceProcessor(**A ) spm.Load(str(A ) ) return spm def __UpperCAmelCase ( A : str ) -> Union[Dict, List]: with open(A , '''r''' ) as f: return json.load(A ) def __UpperCAmelCase ( A : List[str] , A : str ) -> None: with open(A , '''w''' ) as f: json.dump(A , A , indent=2 )
304
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
304
1
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _UpperCamelCase : int = logging.get_logger(__name__) _UpperCamelCase : Tuple = {'vocab_file': 'spiece.model'} _UpperCamelCase : Any = { 'vocab_file': { 'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model', } } _UpperCamelCase : str = { 'AI-Sweden/gpt-sw3-126m': 2_048, 'AI-Sweden/gpt-sw3-350m': 2_048, 'AI-Sweden/gpt-sw3-1.6b': 2_048, 'AI-Sweden/gpt-sw3-6.7b': 2_048, 'AI-Sweden/gpt-sw3-20b': 2_048, } class snake_case__ ( UpperCamelCase): a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , _A : Optional[Any] , _A : Tuple=False , _A : Optional[int]=False , _A : Union[str, Any]=False , _A : str=None , _A : str=None , _A : str=None , _A : int=None , _A : Optional[Dict[str, Any]] = None , **_A : Optional[int] , ) -> None: UpperCAmelCase_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs UpperCAmelCase_ : Dict = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) UpperCAmelCase_ : str = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing UpperCAmelCase_ : Union[str, Any] = '''<|endoftext|>''' if eos_token is None else eos_token UpperCAmelCase_ : Union[str, Any] = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: UpperCAmelCase_ : Optional[int] = unk_token if pad_token is None else pad_token UpperCAmelCase_ : Dict = eos_token if bos_token is None else bos_token else: UpperCAmelCase_ : Any = '''<pad>''' if pad_token is None else pad_token UpperCAmelCase_ : Any = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , pad_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , ) UpperCAmelCase_ : int = do_lower_case UpperCAmelCase_ : Union[str, Any] = remove_space UpperCAmelCase_ : Optional[Any] = keep_accents UpperCAmelCase_ : Dict = vocab_file UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_A ) # Used for whitespace normalization in input texts # fmt : off UpperCAmelCase_ : int = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing UpperCAmelCase_ : Dict = re.compile( F"[{''.join(map(_A , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]" ) def __getstate__( self : Any ) -> int: UpperCAmelCase_ : Optional[int] = self.__dict__.copy() UpperCAmelCase_ : List[str] = None return state def __setstate__( self : str , _A : Optional[int] ) -> List[Any]: UpperCAmelCase_ : Any = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCAmelCase_ : Optional[int] = {} UpperCAmelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def A ( self : List[str] ) -> int: return len(self.sp_model ) def A ( self : Dict , _A : str ) -> str: UpperCAmelCase_ : List[str] = self.non_printing_characters_re.sub('''''' , _A ) # Normalize whitespaces UpperCAmelCase_ : Optional[Any] = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization UpperCAmelCase_ : List[str] = unicodedata.normalize('''NFC''' , _A ) return text def A ( self : Optional[Any] , _A : str , **_A : Any ) -> List[str]: UpperCAmelCase_ : List[str] = self.preprocess_text(_A ) return self.sp_model.encode(_A , out_type=_A ) def A ( self : Dict , _A : str ) -> int: return self.sp_model.PieceToId(_A ) def A ( self : Tuple , _A : int ) -> str: return self.sp_model.IdToPiece(_A ) @staticmethod def A ( _A : str ) -> str: return out_string def A ( self : Dict , _A : List[str] ) -> str: UpperCAmelCase_ : int = [] UpperCAmelCase_ : Optional[Any] = '''''' UpperCAmelCase_ : Dict = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_A ) + token UpperCAmelCase_ : int = True UpperCAmelCase_ : Optional[Any] = [] else: current_sub_tokens.append(_A ) UpperCAmelCase_ : Any = False out_string += self.sp_model.decode(_A ) return out_string def A ( self : str ) -> Dict[str, int]: UpperCAmelCase_ : str = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def A ( self : int , _A : str , _A : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(_A ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return UpperCAmelCase_ : Optional[int] = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _A ) elif not os.path.isfile(self.vocab_file ): with open(_A , '''wb''' ) as fi: UpperCAmelCase_ : Dict = self.sp_model.serialized_model_proto() fi.write(_A ) return (out_vocab_file,) def A ( self : Any , _A : Union[str, List[str]] , _A : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(_A , _A ): UpperCAmelCase_ : int = self.preprocess_text(_A ) UpperCAmelCase_ : str = self.sp_model.encode(_A ) else: UpperCAmelCase_ : List[Any] = [self.preprocess_text(_A ) for t in text] UpperCAmelCase_ : Union[str, Any] = self.sp_model.encode(_A ) if return_tensors is True or return_tensors == "pt": UpperCAmelCase_ : Optional[int] = torch.tensor(_A ) return token_ids def A ( self : Dict , _A : Union[int, List[int]] ) -> str: return self.sp_model.decode(_A ) def A ( self : Dict , _A : "Conversation" ) -> List[int]: UpperCAmelCase_ : str = [F"User: {text}" if is_user else F"Bot: {text}" for is_user, text in conversation.iter_texts()] UpperCAmelCase_ : Optional[int] = ( F"{self.eos_token}{self.bos_token}" + F"{self.bos_token}".join(_A ) + F"{self.bos_token}Bot:" ) return self.encode(text=_A )
304
'''simple docstring''' def __UpperCAmelCase ( A : int ) -> list: # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError('''The given input must be positive''' ) # get the generated string sequence UpperCAmelCase_ : int = gray_code_sequence_string(A ) # # convert them to integers for i in range(len(A ) ): UpperCAmelCase_ : List[str] = int(sequence[i] , 2 ) return sequence def __UpperCAmelCase ( A : int ) -> list: # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] UpperCAmelCase_ : Tuple = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits UpperCAmelCase_ : List[str] = gray_code_sequence_string(bit_count - 1 ) UpperCAmelCase_ : int = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): UpperCAmelCase_ : Union[str, Any] = '''0''' + smaller_sequence[i] sequence.append(A ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): UpperCAmelCase_ : Dict = '''1''' + smaller_sequence[i] sequence.append(A ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
304
1
'''simple docstring''' from __future__ import annotations from collections.abc import Callable def __UpperCAmelCase ( A : Callable[[int | float], int | float] , A : int | float , A : int | float , A : int = 1_0_0 , ) -> float: UpperCAmelCase_ : Tuple = x_start UpperCAmelCase_ : Any = fnc(A ) UpperCAmelCase_ : Tuple = 0.0 for _ in range(A ): # Approximates small segments of curve as linear and solve # for trapezoidal area UpperCAmelCase_ : int = (x_end - x_start) / steps + xa UpperCAmelCase_ : Optional[Any] = fnc(A ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step UpperCAmelCase_ : Optional[int] = xa UpperCAmelCase_ : Union[str, Any] = fxa return area if __name__ == "__main__": def __UpperCAmelCase ( A : Any ) -> List[Any]: return x**3 + x**2 print('f(x) = x^3 + x^2') print('The area between the curve, x = -5, x = 5 and the x axis is:') _UpperCamelCase : Any = 10 while i <= 100_000: print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''') i *= 10
304
'''simple docstring''' import logging from transformers.configuration_utils import PretrainedConfig _UpperCamelCase : Any = logging.getLogger(__name__) class snake_case__ ( UpperCamelCase): a_ = "masked_bert" def __init__( self : str , _A : Dict=3_05_22 , _A : Dict=7_68 , _A : Union[str, Any]=12 , _A : str=12 , _A : str=30_72 , _A : Dict="gelu" , _A : int=0.1 , _A : Optional[Any]=0.1 , _A : Any=5_12 , _A : Union[str, Any]=2 , _A : Union[str, Any]=0.02 , _A : int=1e-12 , _A : Any=0 , _A : Any="topK" , _A : List[str]="constant" , _A : Dict=0.0 , **_A : int , ) -> Union[str, Any]: super().__init__(pad_token_id=_A , **_A ) UpperCAmelCase_ : Union[str, Any] = vocab_size UpperCAmelCase_ : str = hidden_size UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : Optional[int] = num_attention_heads UpperCAmelCase_ : Optional[Any] = hidden_act UpperCAmelCase_ : str = intermediate_size UpperCAmelCase_ : int = hidden_dropout_prob UpperCAmelCase_ : Tuple = attention_probs_dropout_prob UpperCAmelCase_ : Optional[Any] = max_position_embeddings UpperCAmelCase_ : List[str] = type_vocab_size UpperCAmelCase_ : str = initializer_range UpperCAmelCase_ : Union[str, Any] = layer_norm_eps UpperCAmelCase_ : Optional[int] = pruning_method UpperCAmelCase_ : Optional[int] = mask_init UpperCAmelCase_ : List[Any] = mask_scale
304
1
'''simple docstring''' import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class snake_case__ : def __init__( self : Optional[int] , _A : List[Any] , _A : Optional[Any]=13 , _A : Dict=7 , _A : str=True , _A : Union[str, Any]=True , _A : Any=True , _A : List[str]=True , _A : int=True , _A : Dict=False , _A : int=False , _A : List[Any]=False , _A : Union[str, Any]=2 , _A : Optional[int]=99 , _A : Dict=0 , _A : Any=32 , _A : Optional[Any]=5 , _A : List[str]=4 , _A : Optional[int]=0.1 , _A : List[Any]=0.1 , _A : Union[str, Any]=5_12 , _A : Tuple=2 , _A : Dict=0.02 , _A : str=2 , _A : List[str]=4 , _A : int="last" , _A : Any=True , _A : List[Any]=None , _A : int=0 , ) -> Dict: UpperCAmelCase_ : Dict = parent UpperCAmelCase_ : Any = batch_size UpperCAmelCase_ : Optional[int] = seq_length UpperCAmelCase_ : Optional[int] = is_training UpperCAmelCase_ : Any = use_input_lengths UpperCAmelCase_ : List[str] = use_token_type_ids UpperCAmelCase_ : Optional[int] = use_labels UpperCAmelCase_ : List[Any] = gelu_activation UpperCAmelCase_ : int = sinusoidal_embeddings UpperCAmelCase_ : int = causal UpperCAmelCase_ : str = asm UpperCAmelCase_ : Optional[Any] = n_langs UpperCAmelCase_ : Any = vocab_size UpperCAmelCase_ : str = n_special UpperCAmelCase_ : Any = hidden_size UpperCAmelCase_ : Optional[Any] = num_hidden_layers UpperCAmelCase_ : Optional[Any] = num_attention_heads UpperCAmelCase_ : Optional[int] = hidden_dropout_prob UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase_ : Optional[Any] = max_position_embeddings UpperCAmelCase_ : Any = type_sequence_label_size UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Optional[int] = num_labels UpperCAmelCase_ : str = num_choices UpperCAmelCase_ : Union[str, Any] = summary_type UpperCAmelCase_ : Any = use_proj UpperCAmelCase_ : str = scope UpperCAmelCase_ : Optional[Any] = bos_token_id def A ( self : Any ) -> Union[str, Any]: UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : Tuple = None if self.use_input_lengths: UpperCAmelCase_ : Tuple = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length UpperCAmelCase_ : List[Any] = None if self.use_token_type_ids: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : Dict = None UpperCAmelCase_ : Dict = None if self.use_labels: UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , 2 ).float() UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : Optional[int] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def A ( self : str ) -> int: return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def A ( self : List[str] , _A : List[str] , _A : Optional[int] , _A : str , _A : int , _A : Dict , _A : List[Any] , _A : List[str] , _A : int , _A : int , ) -> List[str]: UpperCAmelCase_ : List[Any] = XLMModel(config=_A ) model.to(_A ) model.eval() UpperCAmelCase_ : str = model(_A , lengths=_A , langs=_A ) UpperCAmelCase_ : List[str] = model(_A , langs=_A ) UpperCAmelCase_ : Optional[Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : Any , _A : Any , _A : List[Any] , _A : List[Any] , _A : str , _A : Any , _A : int , _A : Dict , _A : Tuple , _A : Tuple , ) -> Tuple: UpperCAmelCase_ : Optional[int] = XLMWithLMHeadModel(_A ) model.to(_A ) model.eval() UpperCAmelCase_ : Optional[Any] = model(_A , token_type_ids=_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : Dict , _A : Optional[int] , _A : Optional[int] , _A : int , _A : List[Any] , _A : Union[str, Any] , _A : str , _A : Dict , _A : Optional[int] , _A : str , ) -> Optional[Any]: UpperCAmelCase_ : int = XLMForQuestionAnsweringSimple(_A ) model.to(_A ) model.eval() UpperCAmelCase_ : Union[str, Any] = model(_A ) UpperCAmelCase_ : Optional[Any] = model(_A , start_positions=_A , end_positions=_A ) UpperCAmelCase_ : Tuple = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : int , _A : List[Any] , _A : str , _A : List[Any] , _A : Optional[int] , _A : int , _A : Optional[int] , _A : Dict , _A : int , _A : int , ) -> List[str]: UpperCAmelCase_ : Optional[int] = XLMForQuestionAnswering(_A ) model.to(_A ) model.eval() UpperCAmelCase_ : int = model(_A ) UpperCAmelCase_ : str = model( _A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , ) UpperCAmelCase_ : int = model( _A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , ) ((UpperCAmelCase_) , ) : int = result_with_labels.to_tuple() UpperCAmelCase_ : str = model(_A , start_positions=_A , end_positions=_A ) ((UpperCAmelCase_) , ) : Union[str, Any] = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def A ( self : Any , _A : Union[str, Any] , _A : List[str] , _A : int , _A : Optional[int] , _A : Dict , _A : int , _A : Optional[Any] , _A : List[Any] , _A : Optional[Any] , ) -> str: UpperCAmelCase_ : Tuple = XLMForSequenceClassification(_A ) model.to(_A ) model.eval() UpperCAmelCase_ : Dict = model(_A ) UpperCAmelCase_ : Any = model(_A , labels=_A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def A ( self : List[Any] , _A : Optional[Any] , _A : List[str] , _A : Tuple , _A : List[str] , _A : Tuple , _A : Optional[int] , _A : Tuple , _A : List[Any] , _A : Optional[int] , ) -> List[Any]: UpperCAmelCase_ : List[Any] = self.num_labels UpperCAmelCase_ : List[str] = XLMForTokenClassification(_A ) model.to(_A ) model.eval() UpperCAmelCase_ : Optional[Any] = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : int , _A : int , _A : Tuple , _A : List[Any] , _A : Union[str, Any] , _A : Optional[int] , _A : List[Any] , _A : Any , _A : List[str] , _A : Optional[int] , ) -> Any: UpperCAmelCase_ : List[Any] = self.num_choices UpperCAmelCase_ : List[str] = XLMForMultipleChoice(config=_A ) model.to(_A ) model.eval() UpperCAmelCase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase_ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase_ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase_ : List[Any] = model( _A , attention_mask=_A , token_type_ids=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : int ) -> Optional[Any]: UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase_ : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths} return config, inputs_dict @require_torch class snake_case__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase): a_ = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) a_ = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable a_ = ( { "feature-extraction": XLMModel, "fill-mask": XLMWithLMHeadModel, "question-answering": XLMForQuestionAnsweringSimple, "text-classification": XLMForSequenceClassification, "text-generation": XLMWithLMHeadModel, "token-classification": XLMForTokenClassification, "zero-shot": XLMForSequenceClassification, } if is_torch_available() else {} ) def A ( self : Union[str, Any] , _A : int , _A : Any , _A : int , _A : str , _A : Union[str, Any] ) -> Any: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def A ( self : Union[str, Any] , _A : Dict , _A : Optional[int] , _A : str=False ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": UpperCAmelCase_ : Any = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) UpperCAmelCase_ : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) return inputs_dict def A ( self : Optional[int] ) -> int: UpperCAmelCase_ : Union[str, Any] = XLMModelTester(self ) UpperCAmelCase_ : int = ConfigTester(self , config_class=_A , emb_dim=37 ) def A ( self : int ) -> List[str]: self.config_tester.run_common_tests() def A ( self : List[str] ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*_A ) def A ( self : Dict ) -> Optional[Any]: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*_A ) def A ( self : Any ) -> List[str]: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*_A ) def A ( self : Dict ) -> str: UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*_A ) def A ( self : Tuple ) -> Any: UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*_A ) def A ( self : List[str] ) -> int: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*_A ) def A ( self : Dict ) -> Tuple: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*_A ) def A ( self : Dict , _A : Union[str, Any] , _A : str , _A : str , _A : Union[str, Any] , _A : Tuple , _A : List[Any]=False , _A : Optional[int]=1 ) -> Any: self.assertIsInstance(_A , _A ) self.assertListEqual( [isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) ) self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(_A ): # adds PAD dummy token UpperCAmelCase_ : Union[str, Any] = min_length + idx + 1 UpperCAmelCase_ : Union[str, Any] = min_length + idx + 1 UpperCAmelCase_ : Union[str, Any] = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) ) def A ( self : Any , _A : Optional[int] , _A : Union[str, Any] , _A : Any , _A : Optional[Any] , _A : List[str] , _A : Dict=False , _A : List[Any]=1 ) -> List[str]: self.assertIsInstance(_A , _A ) self.assertListEqual( [isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , ) self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(_A ): # adds PAD dummy token UpperCAmelCase_ : Optional[Any] = min_length + idx + 1 UpperCAmelCase_ : str = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , ) pass @slow def A ( self : Tuple ) -> List[str]: for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : List[str] = XLMModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_torch class snake_case__ ( unittest.TestCase): @slow def A ( self : Optional[int] ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' ) model.to(_A ) UpperCAmelCase_ : Tuple = torch.tensor([[14, 4_47]] , dtype=torch.long , device=_A ) # the president UpperCAmelCase_ : Union[str, Any] = [ 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference UpperCAmelCase_ : Dict = model.generate(_A , do_sample=_A ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
304
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase): a_ = StableDiffusionDiffEditPipeline a_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"} a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"} a_ = frozenset( []) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess a_ = frozenset([]) def A ( self : Tuple ) -> Optional[Any]: torch.manual_seed(0 ) UpperCAmelCase_ : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , ) UpperCAmelCase_ : Optional[Any] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , ) UpperCAmelCase_ : Optional[int] = DDIMInverseScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_zero=_A , ) torch.manual_seed(0 ) UpperCAmelCase_ : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) UpperCAmelCase_ : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , ) UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_A ) UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCAmelCase_ : Optional[int] = { '''unet''': unet, '''scheduler''': scheduler, '''inverse_scheduler''': inverse_scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def A ( self : str , _A : List[str] , _A : Any=0 ) -> str: UpperCAmelCase_ : Optional[Any] = floats_tensor((1, 16, 16) , rng=random.Random(_A ) ).to(_A ) UpperCAmelCase_ : Dict = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_A ) ).to(_A ) if str(_A ).startswith('''mps''' ): UpperCAmelCase_ : Any = torch.manual_seed(_A ) else: UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase_ : str = { '''prompt''': '''a dog and a newt''', '''mask_image''': mask, '''image_latents''': latents, '''generator''': generator, '''num_inference_steps''': 2, '''inpaint_strength''': 1.0, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def A ( self : Tuple , _A : Optional[Any] , _A : Optional[Any]=0 ) -> List[str]: UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ : int = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ) if str(_A ).startswith('''mps''' ): UpperCAmelCase_ : Dict = torch.manual_seed(_A ) else: UpperCAmelCase_ : Any = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase_ : Optional[Any] = { '''image''': image, '''source_prompt''': '''a cat and a frog''', '''target_prompt''': '''a dog and a newt''', '''generator''': generator, '''num_inference_steps''': 2, '''num_maps_per_mask''': 2, '''mask_encode_strength''': 1.0, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def A ( self : int , _A : Tuple , _A : List[str]=0 ) -> Any: UpperCAmelCase_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) UpperCAmelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ) if str(_A ).startswith('''mps''' ): UpperCAmelCase_ : Optional[int] = torch.manual_seed(_A ) else: UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase_ : Optional[int] = { '''image''': image, '''prompt''': '''a cat and a frog''', '''generator''': generator, '''num_inference_steps''': 2, '''inpaint_strength''': 1.0, '''guidance_scale''': 6.0, '''decode_latents''': True, '''output_type''': '''numpy''', } return inputs def A ( self : List[str] ) -> Optional[Any]: if not hasattr(self.pipeline_class , '''_optional_components''' ): return UpperCAmelCase_ : str = self.get_dummy_components() UpperCAmelCase_ : Any = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(_A , _A , _A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_A ) UpperCAmelCase_ : str = pipe(**_A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_A ) UpperCAmelCase_ : Any = self.pipeline_class.from_pretrained(_A ) pipe_loaded.to(_A ) pipe_loaded.set_progress_bar_config(disable=_A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(_A , _A ) is None , F"`{optional_component}` did not stay set to None after loading." , ) UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_A ) UpperCAmelCase_ : List[Any] = pipe_loaded(**_A )[0] UpperCAmelCase_ : Any = np.abs(output - output_loaded ).max() self.assertLess(_A , 1e-4 ) def A ( self : Tuple ) -> int: UpperCAmelCase_ : Optional[Any] = '''cpu''' UpperCAmelCase_ : Any = self.get_dummy_components() UpperCAmelCase_ : Optional[int] = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Union[str, Any] = self.get_dummy_mask_inputs(_A ) UpperCAmelCase_ : int = pipe.generate_mask(**_A ) UpperCAmelCase_ : Tuple = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) UpperCAmelCase_ : List[Any] = np.array([0] * 9 ) UpperCAmelCase_ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(_A , 1e-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def A ( self : str ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = '''cpu''' UpperCAmelCase_ : str = self.get_dummy_components() UpperCAmelCase_ : str = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Optional[Any] = self.get_dummy_inversion_inputs(_A ) UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images UpperCAmelCase_ : List[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase_ : int = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) UpperCAmelCase_ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_A , 1e-3 ) def A ( self : Tuple ) -> Optional[Any]: super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def A ( self : str ) -> Tuple: UpperCAmelCase_ : Any = '''cpu''' UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components() UpperCAmelCase_ : Any = {'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''} UpperCAmelCase_ : Any = DPMSolverMultistepScheduler(**_A ) UpperCAmelCase_ : Optional[Any] = DPMSolverMultistepInverseScheduler(**_A ) UpperCAmelCase_ : Union[str, Any] = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inversion_inputs(_A ) UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images UpperCAmelCase_ : Tuple = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase_ : List[Any] = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) UpperCAmelCase_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_A , 1e-3 ) @require_torch_gpu @slow class snake_case__ ( unittest.TestCase): def A ( self : Optional[Any] ) -> Optional[int]: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def A ( cls : Dict ) -> List[Any]: UpperCAmelCase_ : Optional[int] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' ) UpperCAmelCase_ : int = raw_image.convert('''RGB''' ).resize((7_68, 7_68) ) UpperCAmelCase_ : Any = raw_image def A ( self : List[Any] ) -> List[str]: UpperCAmelCase_ : int = torch.manual_seed(0 ) UpperCAmelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa ) UpperCAmelCase_ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase_ : List[str] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit''' UpperCAmelCase_ : Tuple = '''a bowl of pears''' UpperCAmelCase_ : Optional[int] = pipe.generate_mask( image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , ) UpperCAmelCase_ : List[str] = pipe.invert( prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A ).latents UpperCAmelCase_ : Any = pipe( prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0] UpperCAmelCase_ : str = ( np.array( load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/diffedit/pears.png''' ).resize((7_68, 7_68) ) ) / 2_55 ) assert np.abs((expected_image - image).max() ) < 5e-1 def A ( self : Tuple ) -> List[str]: UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Any = StableDiffusionDiffEditPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa ) UpperCAmelCase_ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase_ : Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit''' UpperCAmelCase_ : Dict = '''a bowl of pears''' UpperCAmelCase_ : Union[str, Any] = pipe.generate_mask( image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , ) UpperCAmelCase_ : List[Any] = pipe.invert( prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A , num_inference_steps=25 , ).latents UpperCAmelCase_ : Dict = pipe( prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0] UpperCAmelCase_ : Tuple = ( np.array( load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/diffedit/pears.png''' ).resize((7_68, 7_68) ) ) / 2_55 ) assert np.abs((expected_image - image).max() ) < 5e-1
304
1
'''simple docstring''' import math from datetime import datetime, timedelta def __UpperCAmelCase ( A : int ) -> datetime: UpperCAmelCase_ : List[str] = year % 1_9 UpperCAmelCase_ : Dict = year % 4 UpperCAmelCase_ : int = year % 7 UpperCAmelCase_ : str = math.floor(year / 1_0_0 ) UpperCAmelCase_ : Union[str, Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 ) UpperCAmelCase_ : str = leap_day_inhibits / 4 UpperCAmelCase_ : Optional[Any] = ( 1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 3_0 UpperCAmelCase_ : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 UpperCAmelCase_ : str = (1_9 * metonic_cycle + secular_moon_shift) % 3_0 # PHM -> Paschal Full Moon UpperCAmelCase_ : Tuple = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 2_9 and days_from_phm_to_sunday == 6: return datetime(A , 4 , 1_9 ) elif days_to_add == 2_8 and days_from_phm_to_sunday == 6: return datetime(A , 4 , 1_8 ) else: return datetime(A , 3 , 2_2 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1_994, 2_000, 2_010, 2_021, 2_023): _UpperCamelCase : List[str] = 'will be' if year > datetime.now().year else 'was' print(f'''Easter in {year} {tense} {gauss_easter(year)}''')
304
'''simple docstring''' import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case__ ( UpperCamelCase): def A ( self : List[str] ) -> List[Any]: UpperCAmelCase_ : int = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) ) self.parent.assertTrue(hasattr(_A , '''num_heads''' ) ) class snake_case__ : def __init__( self : List[Any] , _A : List[str] , _A : Optional[Any]=13 , _A : List[str]=64 , _A : Tuple=3 , _A : int=[16, 48, 96] , _A : int=[1, 3, 6] , _A : Union[str, Any]=[1, 2, 10] , _A : List[Any]=[7, 3, 3] , _A : Optional[Any]=[4, 2, 2] , _A : List[Any]=[2, 1, 1] , _A : Union[str, Any]=[2, 2, 2] , _A : Tuple=[False, False, True] , _A : str=[0.0, 0.0, 0.0] , _A : List[Any]=0.02 , _A : int=1e-12 , _A : Optional[int]=True , _A : List[str]=True , _A : Union[str, Any]=2 , ) -> List[Any]: UpperCAmelCase_ : int = parent UpperCAmelCase_ : List[Any] = batch_size UpperCAmelCase_ : Any = image_size UpperCAmelCase_ : Tuple = patch_sizes UpperCAmelCase_ : int = patch_stride UpperCAmelCase_ : Any = patch_padding UpperCAmelCase_ : List[Any] = is_training UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : Union[str, Any] = num_labels UpperCAmelCase_ : List[str] = num_channels UpperCAmelCase_ : int = embed_dim UpperCAmelCase_ : Optional[int] = num_heads UpperCAmelCase_ : Tuple = stride_kv UpperCAmelCase_ : Optional[Any] = depth UpperCAmelCase_ : Dict = cls_token UpperCAmelCase_ : Dict = attention_drop_rate UpperCAmelCase_ : Any = initializer_range UpperCAmelCase_ : List[str] = layer_norm_eps def A ( self : int ) -> List[str]: UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase_ : List[str] = self.get_config() return config, pixel_values, labels def A ( self : List[str] ) -> int: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def A ( self : Dict , _A : List[Any] , _A : Tuple , _A : Optional[Any] ) -> List[str]: UpperCAmelCase_ : List[Any] = CvtModel(config=_A ) model.to(_A ) model.eval() UpperCAmelCase_ : Tuple = model(_A ) UpperCAmelCase_ : List[str] = (self.image_size, self.image_size) UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image_size[0], image_size[1] for i in range(len(self.depth ) ): UpperCAmelCase_ : int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) UpperCAmelCase_ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def A ( self : Any , _A : int , _A : str , _A : Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : str = self.num_labels UpperCAmelCase_ : str = CvtForImageClassification(_A ) model.to(_A ) model.eval() UpperCAmelCase_ : int = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Dict ) -> Any: UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = config_and_inputs UpperCAmelCase_ : Optional[int] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase): a_ = (CvtModel, CvtForImageClassification) if is_torch_available() else () a_ = ( {"feature-extraction": CvtModel, "image-classification": CvtForImageClassification} if is_torch_available() else {} ) a_ = False a_ = False a_ = False a_ = False a_ = False def A ( self : int ) -> List[str]: UpperCAmelCase_ : Optional[int] = CvtModelTester(self ) UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def A ( self : Any ) -> Dict: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A ( self : int ) -> List[str]: return @unittest.skip(reason='''Cvt does not output attentions''' ) def A ( self : Optional[int] ) -> Optional[int]: pass @unittest.skip(reason='''Cvt does not use inputs_embeds''' ) def A ( self : Any ) -> Optional[Any]: pass @unittest.skip(reason='''Cvt does not support input and output embeddings''' ) def A ( self : List[Any] ) -> Any: pass def A ( self : int ) -> str: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Tuple = model_class(_A ) UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Tuple = [*signature.parameters.keys()] UpperCAmelCase_ : str = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _A ) def A ( self : Tuple ) -> int: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def A ( self : Dict ) -> List[str]: def check_hidden_states_output(_A : Dict , _A : str , _A : int ): UpperCAmelCase_ : str = model_class(_A ) model.to(_A ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(_A , _A ) ) UpperCAmelCase_ : Optional[Any] = outputs.hidden_states UpperCAmelCase_ : Any = len(self.model_tester.depth ) self.assertEqual(len(_A ) , _A ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = True check_hidden_states_output(_A , _A , _A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Dict = True check_hidden_states_output(_A , _A , _A ) def A ( self : Union[str, Any] ) -> List[str]: UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def A ( self : List[Any] ) -> Optional[Any]: pass @slow def A ( self : Optional[int] ) -> int: for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[Any] = CvtModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def __UpperCAmelCase ( ) -> str: UpperCAmelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class snake_case__ ( unittest.TestCase): @cached_property def A ( self : Union[str, Any] ) -> Union[str, Any]: return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def A ( self : str ) -> str: UpperCAmelCase_ : str = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_A ) UpperCAmelCase_ : Optional[int] = self.default_image_processor UpperCAmelCase_ : List[str] = prepare_img() UpperCAmelCase_ : List[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Any = model(**_A ) # verify the logits UpperCAmelCase_ : Tuple = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , _A ) UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(_A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
304
1
'''simple docstring''' import os def __UpperCAmelCase ( ) -> Optional[int]: with open(os.path.dirname(A ) + '''/grid.txt''' ) as f: UpperCAmelCase_ : Dict = [] # noqa: E741 for _ in range(2_0 ): l.append([int(A ) for x in f.readline().split()] ) UpperCAmelCase_ : str = 0 # right for i in range(2_0 ): for j in range(1_7 ): UpperCAmelCase_ : Optional[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: UpperCAmelCase_ : List[Any] = temp # down for i in range(1_7 ): for j in range(2_0 ): UpperCAmelCase_ : Tuple = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: UpperCAmelCase_ : List[Any] = temp # diagonal 1 for i in range(1_7 ): for j in range(1_7 ): UpperCAmelCase_ : Tuple = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: UpperCAmelCase_ : List[str] = temp # diagonal 2 for i in range(1_7 ): for j in range(3 , 2_0 ): UpperCAmelCase_ : List[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: UpperCAmelCase_ : List[Any] = temp return maximum if __name__ == "__main__": print(solution())
304
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase) class snake_case__ ( UpperCamelCase): a_ = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True}) a_ = Features({"text": Value("string")}) a_ = Features({}) a_ = "text" @property def A ( self : List[str] ) -> Dict[str, str]: return {self.text_column: "text"}
304
1
'''simple docstring''' def __UpperCAmelCase ( A : str ) -> str: UpperCAmelCase_ : Optional[int] = 0 # if input_string is "aba" than new_input_string become "a|b|a" UpperCAmelCase_ : Union[str, Any] = '''''' UpperCAmelCase_ : Optional[int] = '''''' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(A ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring UpperCAmelCase_ , UpperCAmelCase_ : Any = 0, 0 # length[i] shows the length of palindromic substring with center i UpperCAmelCase_ : str = [1 for i in range(len(A ) )] # for each character in new_string find corresponding palindromic string UpperCAmelCase_ : Dict = 0 for j in range(len(A ) ): UpperCAmelCase_ : str = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(A ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 UpperCAmelCase_ : str = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: UpperCAmelCase_ : Dict = j - k + 1 # noqa: E741 UpperCAmelCase_ : Union[str, Any] = j + k - 1 # update max_length and start position if max_length < length[j]: UpperCAmelCase_ : str = length[j] UpperCAmelCase_ : List[Any] = j # create that string UpperCAmelCase_ : str = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
304
'''simple docstring''' import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def __UpperCAmelCase ( A : int , A : Any="shi-labs/oneformer_demo" ) -> Dict: with open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) as f: UpperCAmelCase_ : Union[str, Any] = json.load(A ) UpperCAmelCase_ : Optional[int] = {} UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : str = [] for key, info in class_info.items(): UpperCAmelCase_ : Tuple = info['''name'''] class_names.append(info['''name'''] ) if info["isthing"]: thing_ids.append(int(A ) ) UpperCAmelCase_ : Any = thing_ids UpperCAmelCase_ : Union[str, Any] = class_names return metadata class snake_case__ ( unittest.TestCase): def __init__( self : Any , _A : str , _A : Optional[int]=7 , _A : Tuple=3 , _A : Tuple=30 , _A : List[Any]=4_00 , _A : Tuple=None , _A : Optional[Any]=True , _A : Optional[Any]=True , _A : Any=[0.5, 0.5, 0.5] , _A : Any=[0.5, 0.5, 0.5] , _A : List[str]=10 , _A : Optional[int]=False , _A : Union[str, Any]=2_55 , _A : List[Any]="shi-labs/oneformer_demo" , _A : str="ade20k_panoptic.json" , _A : List[Any]=10 , ) -> Any: UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : Optional[Any] = batch_size UpperCAmelCase_ : Optional[Any] = num_channels UpperCAmelCase_ : Tuple = min_resolution UpperCAmelCase_ : Optional[int] = max_resolution UpperCAmelCase_ : Dict = do_resize UpperCAmelCase_ : Tuple = {'''shortest_edge''': 32, '''longest_edge''': 13_33} if size is None else size UpperCAmelCase_ : int = do_normalize UpperCAmelCase_ : List[Any] = image_mean UpperCAmelCase_ : Dict = image_std UpperCAmelCase_ : str = class_info_file UpperCAmelCase_ : Optional[Any] = prepare_metadata(_A , _A ) UpperCAmelCase_ : Tuple = num_text UpperCAmelCase_ : Union[str, Any] = repo_path # for the post_process_functions UpperCAmelCase_ : Any = 2 UpperCAmelCase_ : Dict = 10 UpperCAmelCase_ : int = 10 UpperCAmelCase_ : Optional[Any] = 3 UpperCAmelCase_ : str = 4 UpperCAmelCase_ : int = num_labels UpperCAmelCase_ : Union[str, Any] = do_reduce_labels UpperCAmelCase_ : str = ignore_index def A ( self : Dict ) -> List[Any]: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def A ( self : Any , _A : List[Any] , _A : List[str]=False ) -> Optional[Any]: if not batched: UpperCAmelCase_ : Any = image_inputs[0] if isinstance(_A , Image.Image ): UpperCAmelCase_ , UpperCAmelCase_ : Dict = image.size else: UpperCAmelCase_ , UpperCAmelCase_ : int = image.shape[1], image.shape[2] if w < h: UpperCAmelCase_ : Union[str, Any] = int(self.size['''shortest_edge'''] * h / w ) UpperCAmelCase_ : int = self.size['''shortest_edge'''] elif w > h: UpperCAmelCase_ : List[Any] = self.size['''shortest_edge'''] UpperCAmelCase_ : Any = int(self.size['''shortest_edge'''] * w / h ) else: UpperCAmelCase_ : Dict = self.size['''shortest_edge'''] UpperCAmelCase_ : str = self.size['''shortest_edge'''] else: UpperCAmelCase_ : Dict = [] for image in image_inputs: UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase_ : int = max(_A , key=lambda _A : item[0] )[0] UpperCAmelCase_ : List[str] = max(_A , key=lambda _A : item[1] )[1] return expected_height, expected_width def A ( self : Tuple ) -> str: return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class snake_case__ ( UpperCamelCase , unittest.TestCase): a_ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string a_ = image_processing_class def A ( self : Optional[int] ) -> Any: UpperCAmelCase_ : int = OneFormerImageProcessorTester(self ) @property def A ( self : Any ) -> int: return self.image_processing_tester.prepare_image_processor_dict() def A ( self : Optional[Any] ) -> List[Any]: UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''ignore_index''' ) ) self.assertTrue(hasattr(_A , '''class_info_file''' ) ) self.assertTrue(hasattr(_A , '''num_text''' ) ) self.assertTrue(hasattr(_A , '''repo_path''' ) ) self.assertTrue(hasattr(_A , '''metadata''' ) ) self.assertTrue(hasattr(_A , '''do_reduce_labels''' ) ) def A ( self : Dict ) -> Dict: pass def A ( self : Tuple ) -> Dict: # Initialize image_processor UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input UpperCAmelCase_ : str = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A , batched=_A ) UpperCAmelCase_ : int = image_processor( _A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def A ( self : Tuple ) -> Tuple: # Initialize image_processor UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input UpperCAmelCase_ : List[str] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.image_processing_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ , UpperCAmelCase_ : str = self.image_processing_tester.get_expected_values(_A , batched=_A ) UpperCAmelCase_ : Tuple = image_processor( _A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def A ( self : Dict ) -> Union[str, Any]: # Initialize image_processor UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input UpperCAmelCase_ : int = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.image_processing_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ , UpperCAmelCase_ : int = self.image_processing_tester.get_expected_values(_A , batched=_A ) UpperCAmelCase_ : Optional[int] = image_processor( _A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def A ( self : int , _A : Any=False , _A : List[Any]=False , _A : Any="np" ) -> str: UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # prepare image and target UpperCAmelCase_ : Tuple = self.image_processing_tester.num_labels UpperCAmelCase_ : int = None UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A ) if with_segmentation_maps: UpperCAmelCase_ : Any = num_labels if is_instance_map: UpperCAmelCase_ : Any = list(range(_A ) ) * 2 UpperCAmelCase_ : Optional[Any] = dict(enumerate(_A ) ) UpperCAmelCase_ : Dict = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": UpperCAmelCase_ : Dict = [Image.fromarray(_A ) for annotation in annotations] UpperCAmelCase_ : Tuple = image_processor( _A , ['''semantic'''] * len(_A ) , _A , return_tensors='''pt''' , instance_id_to_semantic_id=_A , pad_and_return_pixel_mask=_A , ) return inputs def A ( self : int ) -> str: pass def A ( self : Tuple ) -> Union[str, Any]: def common(_A : Optional[int]=False , _A : str=None ): UpperCAmelCase_ : List[str] = self.comm_get_image_processor_inputs( with_segmentation_maps=_A , is_instance_map=_A , segmentation_type=_A ) UpperCAmelCase_ : List[Any] = inputs['''mask_labels'''] UpperCAmelCase_ : Optional[Any] = inputs['''class_labels'''] UpperCAmelCase_ : int = inputs['''pixel_values'''] UpperCAmelCase_ : Tuple = inputs['''text_inputs'''] # check the batch_size for mask_label, class_label, text_input in zip(_A , _A , _A ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(_A ) , self.image_processing_tester.num_text ) common() common(is_instance_map=_A ) common(is_instance_map=_A , segmentation_type='''pil''' ) common(is_instance_map=_A , segmentation_type='''pil''' ) def A ( self : List[Any] ) -> List[Any]: UpperCAmelCase_ : int = np.zeros((20, 50) ) UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : Dict = 1 UpperCAmelCase_ : List[Any] = 1 UpperCAmelCase_ : List[Any] = binary_mask_to_rle(_A ) self.assertEqual(len(_A ) , 4 ) self.assertEqual(rle[0] , 21 ) self.assertEqual(rle[1] , 45 ) def A ( self : Any ) -> List[Any]: UpperCAmelCase_ : int = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) UpperCAmelCase_ : Any = self.image_processing_tester.get_fake_oneformer_outputs() UpperCAmelCase_ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(_A ) self.assertEqual(len(_A ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) UpperCAmelCase_ : List[str] = [(1, 4) for i in range(self.image_processing_tester.batch_size )] UpperCAmelCase_ : Any = fature_extractor.post_process_semantic_segmentation(_A , target_sizes=_A ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def A ( self : Optional[Any] ) -> Tuple: UpperCAmelCase_ : Any = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) UpperCAmelCase_ : Dict = self.image_processing_tester.get_fake_oneformer_outputs() UpperCAmelCase_ : List[Any] = image_processor.post_process_instance_segmentation(_A , threshold=0 ) self.assertTrue(len(_A ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('''segmentation''' in el ) self.assertTrue('''segments_info''' in el ) self.assertEqual(type(el['''segments_info'''] ) , _A ) self.assertEqual( el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def A ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) UpperCAmelCase_ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs() UpperCAmelCase_ : List[Any] = image_processor.post_process_panoptic_segmentation(_A , threshold=0 ) self.assertTrue(len(_A ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('''segmentation''' in el ) self.assertTrue('''segments_info''' in el ) self.assertEqual(type(el['''segments_info'''] ) , _A ) self.assertEqual( el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
304
1
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _UpperCamelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class snake_case__ ( UpperCamelCase , unittest.TestCase): a_ = ReformerTokenizer a_ = ReformerTokenizerFast a_ = True a_ = False a_ = True def A ( self : Optional[Any] ) -> List[Any]: super().setUp() UpperCAmelCase_ : Tuple = ReformerTokenizer(_A , keep_accents=_A ) tokenizer.save_pretrained(self.tmpdirname ) def A ( self : Optional[Any] ) -> Any: UpperCAmelCase_ : List[Any] = '''<s>''' UpperCAmelCase_ : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def A ( self : Any ) -> str: UpperCAmelCase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(_A ) , 10_00 ) def A ( self : Optional[int] ) -> int: self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def A ( self : Optional[Any] ) -> List[Any]: if not self.test_rust_tokenizer: return UpperCAmelCase_ : int = self.get_tokenizer() UpperCAmelCase_ : Tuple = self.get_rust_tokenizer() UpperCAmelCase_ : Any = '''I was born in 92000, and this is falsé.''' UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize(_A ) UpperCAmelCase_ : Optional[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) UpperCAmelCase_ : List[str] = tokenizer.encode(_A , add_special_tokens=_A ) UpperCAmelCase_ : int = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) UpperCAmelCase_ : Tuple = self.get_rust_tokenizer() UpperCAmelCase_ : Dict = tokenizer.encode(_A ) UpperCAmelCase_ : List[str] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def A ( self : Tuple , _A : Dict=15 ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(_A , **_A ) # Simple input UpperCAmelCase_ : Optional[int] = '''This is a simple input''' UpperCAmelCase_ : List[str] = ['''This is a simple input 1''', '''This is a simple input 2'''] UpperCAmelCase_ : Union[str, Any] = ('''This is a simple input''', '''This is a pair''') UpperCAmelCase_ : Dict = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' ) # Simple input self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' ) # Simple input self.assertRaises( _A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , ) # Pair input self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' ) # Pair input self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' ) # Pair input self.assertRaises( _A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , ) def A ( self : Union[str, Any] ) -> int: pass def A ( self : int ) -> Any: UpperCAmelCase_ : Any = ReformerTokenizer(_A , keep_accents=_A ) UpperCAmelCase_ : List[str] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_A ) , [2_85, 46, 10, 1_70, 3_82] , ) UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) UpperCAmelCase_ : List[str] = tokenizer.convert_tokens_to_ids(_A ) self.assertListEqual( _A , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def A ( self : List[str] ) -> Optional[int]: return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' ) @slow def A ( self : str ) -> str: UpperCAmelCase_ : Tuple = '''Hello World!''' UpperCAmelCase_ : int = [1_26, 32, 2_62, 1_52, 38, 72, 2_87] self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @slow def A ( self : List[Any] ) -> str: UpperCAmelCase_ : Tuple = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) UpperCAmelCase_ : int = [ 1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 35, 28, 2_75, 3, 2_59, 2_97, 2_60, 84, 4, 35, 1_10, 44, 8, 2_59, 91, 2_68, 21, 11, 2_09, 2_74, 1_09, 2_66, 2_77, 1_17, 86, 93, 3_15, 2_58, 2_78, 2_58, 2_77, 2_58, 0, 2_58, 2_88, 2_58, 3_19, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 2_87, 2_58, 3_15, 2_58, 2_89, 2_58, 2_78, 99, 2_69, 2_66, 2_62, 8, 2_59, 2_41, 4, 2_17, 2_30, 2_68, 2_66, 55, 1_68, 1_06, 75, 1_93, 2_66, 2_23, 27, 49, 26, 2_82, 25, 2_64, 2_99, 19, 26, 0, 2_58, 2_77, 1_17, 86, 93, 1_76, 1_83, 2_70, 11, 2_62, 42, 61, 2_65, ] self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @require_torch @slow def A ( self : List[str] ) -> Optional[int]: import torch from transformers import ReformerConfig, ReformerModel # Build sequence UpperCAmelCase_ : int = list(self.big_tokenizer.get_vocab().keys() )[:10] UpperCAmelCase_ : List[Any] = ''' '''.join(_A ) UpperCAmelCase_ : str = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' ) UpperCAmelCase_ : Any = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' ) UpperCAmelCase_ : List[Any] = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) UpperCAmelCase_ : Any = encoded_sequence['''input_ids'''].shape UpperCAmelCase_ : Optional[int] = ReformerModel(_A ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**_A ) model(**_A ) @slow def A ( self : int ) -> Optional[Any]: # fmt: off UpperCAmelCase_ : int = {'''input_ids''': [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 UpperCAmelCase_ : Optional[Any] = [ '''This is a very simple sentence.''', '''The quick brown fox jumps over the lazy dog.''', ] self.tokenizer_integration_test_util( expected_encoding=_A , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=_A , sequences=_A , )
304
'''simple docstring''' import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py _UpperCamelCase : Optional[int] = 'src/transformers' # This is to make sure the transformers module imported is the one in the repo. _UpperCamelCase : List[str] = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. _UpperCamelCase : Tuple = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') _UpperCamelCase : str = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _UpperCamelCase : Optional[int] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # Fill this with tuples (pipeline_tag, model_mapping, auto_model) _UpperCamelCase : List[str] = [ ('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'), ('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'), ('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'), ('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'), ('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'), ('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'), ('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'), ('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'), ('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'), ( 'zero-shot-object-detection', 'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForZeroShotObjectDetection', ), ('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'), ('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'), ('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'), ('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'), ( 'table-question-answering', 'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForTableQuestionAnswering', ), ('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'), ('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'), ( 'next-sentence-prediction', 'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES', 'AutoModelForNextSentencePrediction', ), ( 'audio-frame-classification', 'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioFrameClassification', ), ('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'), ( 'document-question-answering', 'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForDocumentQuestionAnswering', ), ( 'visual-question-answering', 'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForVisualQuestionAnswering', ), ('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'), ( 'zero-shot-image-classification', 'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForZeroShotImageClassification', ), ('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'), ('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'), ('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'), ] def __UpperCAmelCase ( A : Optional[int] ) -> int: UpperCAmelCase_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , A ) return [m.group(0 ) for m in matches] def __UpperCAmelCase ( ) -> str: UpperCAmelCase_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES UpperCAmelCase_ : Optional[Any] = { config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. UpperCAmelCase_ : Dict = collections.defaultdict(A ) UpperCAmelCase_ : str = collections.defaultdict(A ) UpperCAmelCase_ : int = collections.defaultdict(A ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(A ): UpperCAmelCase_ : int = None if _re_tf_models.match(A ) is not None: UpperCAmelCase_ : Optional[Any] = tf_models UpperCAmelCase_ : Optional[int] = _re_tf_models.match(A ).groups()[0] elif _re_flax_models.match(A ) is not None: UpperCAmelCase_ : int = flax_models UpperCAmelCase_ : Any = _re_flax_models.match(A ).groups()[0] elif _re_pt_models.match(A ) is not None: UpperCAmelCase_ : Union[str, Any] = pt_models UpperCAmelCase_ : List[Any] = _re_pt_models.match(A ).groups()[0] if lookup_dict is not None: while len(A ) > 0: if attr_name in model_prefix_to_model_type: UpperCAmelCase_ : Optional[int] = True break # Try again after removing the last word in the name UpperCAmelCase_ : List[Any] = ''''''.join(camel_case_split(A )[:-1] ) UpperCAmelCase_ : Tuple = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) UpperCAmelCase_ : List[Any] = list(A ) all_models.sort() UpperCAmelCase_ : Dict = {'''model_type''': all_models} UpperCAmelCase_ : Tuple = [pt_models[t] for t in all_models] UpperCAmelCase_ : Dict = [tf_models[t] for t in all_models] UpperCAmelCase_ : Optional[int] = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure UpperCAmelCase_ : int = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: UpperCAmelCase_ : Any = '''AutoProcessor''' elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: UpperCAmelCase_ : Union[str, Any] = '''AutoTokenizer''' elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: UpperCAmelCase_ : int = '''AutoFeatureExtractor''' else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. UpperCAmelCase_ : Dict = '''AutoTokenizer''' UpperCAmelCase_ : str = [processors[t] for t in all_models] return pd.DataFrame(A ) def __UpperCAmelCase ( A : Optional[int] ) -> str: UpperCAmelCase_ : int = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: UpperCAmelCase_ : Tuple = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"] UpperCAmelCase_ : Tuple = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"] # Loop through all three frameworks for module, cls, mapping in zip(A , A , A ): # The type of pipeline may not exist in this framework if not hasattr(A , A ): continue # First extract all model_names UpperCAmelCase_ : List[str] = [] for name in getattr(A , A ).values(): if isinstance(A , A ): model_names.append(A ) else: model_names.extend(list(A ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def __UpperCAmelCase ( A : int , A : Any ) -> Tuple: UpperCAmelCase_ : Tuple = get_frameworks_table() UpperCAmelCase_ : Any = Dataset.from_pandas(A ) UpperCAmelCase_ : str = hf_hub_download( '''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=A ) UpperCAmelCase_ : Union[str, Any] = Dataset.from_json(A ) UpperCAmelCase_ : Optional[int] = { tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class''']) for i in range(len(A ) ) } UpperCAmelCase_ : str = update_pipeline_and_auto_class_table(A ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. UpperCAmelCase_ : Union[str, Any] = sorted(table.keys() ) UpperCAmelCase_ : Optional[Any] = pd.DataFrame( { '''model_class''': model_classes, '''pipeline_tag''': [table[m][0] for m in model_classes], '''auto_class''': [table[m][1] for m in model_classes], } ) UpperCAmelCase_ : Dict = Dataset.from_pandas(A ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(A , '''frameworks.json''' ) ) tags_dataset.to_json(os.path.join(A , '''pipeline_tags.json''' ) ) if commit_sha is not None: UpperCAmelCase_ : List[str] = ( F"Update with commit {commit_sha}\n\nSee: " F"https://github.com/huggingface/transformers/commit/{commit_sha}" ) else: UpperCAmelCase_ : int = '''Update''' upload_folder( repo_id='''huggingface/transformers-metadata''' , folder_path=A , repo_type='''dataset''' , token=A , commit_message=A , ) def __UpperCAmelCase ( ) -> int: UpperCAmelCase_ : str = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} UpperCAmelCase_ : List[str] = transformers_module.pipelines.SUPPORTED_TASKS UpperCAmelCase_ : List[str] = [] for key in pipeline_tasks: if key not in in_table: UpperCAmelCase_ : Optional[Any] = pipeline_tasks[key]['''pt'''] if isinstance(A , (list, tuple) ): UpperCAmelCase_ : Dict = model[0] UpperCAmelCase_ : Any = model.__name__ if model not in in_table.values(): missing.append(A ) if len(A ) > 0: UpperCAmelCase_ : List[Any] = ''', '''.join(A ) raise ValueError( '''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside ''' F"`utils/update_metadata.py`: {msg}. Please add them!" ) if __name__ == "__main__": _UpperCamelCase : int = argparse.ArgumentParser() parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.') parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.') parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.') _UpperCamelCase : Tuple = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
304
1
'''simple docstring''' from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class snake_case__ ( UpperCamelCase , UpperCamelCase): a_ = "pixel_values" a_ = False a_ = TimmBackboneConfig def __init__( self : Any , _A : Optional[Any] , **_A : Tuple ) -> Union[str, Any]: requires_backends(self , '''timm''' ) super().__init__(_A ) UpperCAmelCase_ : List[str] = config if config.backbone is None: raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' ) if config.backbone not in timm.list_models(): raise ValueError(F"backbone {config.backbone} is not supported by timm." ) if hasattr(_A , '''out_features''' ) and config.out_features is not None: raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' ) UpperCAmelCase_ : Optional[int] = getattr(_A , '''use_pretrained_backbone''' , _A ) if pretrained is None: raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' ) # We just take the final layer by default. This matches the default for the transformers models. UpperCAmelCase_ : List[str] = config.out_indices if getattr(_A , '''out_indices''' , _A ) is not None else (-1,) UpperCAmelCase_ : Union[str, Any] = timm.create_model( config.backbone , pretrained=_A , features_only=config.features_only , in_chans=config.num_channels , out_indices=_A , **_A , ) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. UpperCAmelCase_ : Union[str, Any] = self._backbone.return_layers UpperCAmelCase_ : Union[str, Any] = {layer['''module''']: str(_A ) for i, layer in enumerate(self._backbone.feature_info.info )} super()._init_backbone(_A ) @classmethod def A ( cls : Dict , _A : Optional[Any] , *_A : Dict , **_A : int ) -> Optional[Any]: requires_backends(cls , ['''vision''', '''timm'''] ) from ...models.timm_backbone import TimmBackboneConfig UpperCAmelCase_ : Optional[int] = kwargs.pop('''config''' , TimmBackboneConfig() ) UpperCAmelCase_ : int = kwargs.pop('''use_timm_backbone''' , _A ) if not use_timm: raise ValueError('''use_timm_backbone must be True for timm backbones''' ) UpperCAmelCase_ : Optional[int] = kwargs.pop('''num_channels''' , config.num_channels ) UpperCAmelCase_ : Optional[int] = kwargs.pop('''features_only''' , config.features_only ) UpperCAmelCase_ : int = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone ) UpperCAmelCase_ : Union[str, Any] = kwargs.pop('''out_indices''' , config.out_indices ) UpperCAmelCase_ : Tuple = TimmBackboneConfig( backbone=_A , num_channels=_A , features_only=_A , use_pretrained_backbone=_A , out_indices=_A , ) return super()._from_config(_A , **_A ) def A ( self : List[str] , _A : Optional[Any] ) -> Any: pass def A ( self : Optional[int] , _A : Tuple , _A : List[Any]=None , _A : Any=None , _A : Any=None , **_A : List[Any] ) -> Union[BackboneOutput, Tuple[Tensor, ...]]: UpperCAmelCase_ : str = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase_ : Any = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase_ : List[str] = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError('''Cannot output attentions for timm backbones at the moment''' ) if output_hidden_states: # We modify the return layers to include all the stages of the backbone UpperCAmelCase_ : int = self._all_layers UpperCAmelCase_ : Optional[Any] = self._backbone(_A , **_A ) UpperCAmelCase_ : Optional[int] = self._return_layers UpperCAmelCase_ : List[str] = tuple(hidden_states[i] for i in self.out_indices ) else: UpperCAmelCase_ : List[Any] = self._backbone(_A , **_A ) UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : Optional[int] = tuple(_A ) UpperCAmelCase_ : str = tuple(_A ) if hidden_states is not None else None if not return_dict: UpperCAmelCase_ : List[str] = (feature_maps,) if output_hidden_states: UpperCAmelCase_ : Optional[int] = output + (hidden_states,) return output return BackboneOutput(feature_maps=_A , hidden_states=_A , attentions=_A )
304
'''simple docstring''' import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _UpperCamelCase : Union[str, Any] = logging.getLogger(__name__) _UpperCamelCase : Optional[int] = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) _UpperCamelCase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class snake_case__ : a_ = field( default=UpperCamelCase , metadata={ "help": ( "The model checkpoint for weights initialization. Leave None if you want to train a model from" " scratch." ) } , ) a_ = field( default=UpperCamelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCamelCase)} , ) a_ = field( default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"}) a_ = field( default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) a_ = field( default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class snake_case__ : a_ = field( default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."}) a_ = field( default=UpperCamelCase , metadata={ "help": ( "The input training data files (multiple files in glob format). " "Very often splitting large files to smaller files can prevent tokenizer going out of memory" ) } , ) a_ = field( default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) a_ = field( default=UpperCamelCase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , ) a_ = field( default=UpperCamelCase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , ) a_ = field( default=UpperCamelCase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , ) a_ = field( default=UpperCamelCase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."}) a_ = field(default=UpperCamelCase , metadata={"help": "Whether ot not to use whole word mask."}) a_ = field( default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}) a_ = field( default=1 / 6 , metadata={ "help": ( "Ratio of length of a span of masked tokens to surrounding context length for permutation language" " modeling." ) } , ) a_ = field( default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."}) a_ = field( default=-1 , metadata={ "help": ( "Optional input sequence length after tokenization." "The training dataset will be truncated in block of this size for training." "Default to the model max input length for single sentence inputs (take into account special tokens)." ) } , ) a_ = field( default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"}) def __UpperCAmelCase ( A : DataTrainingArguments , A : PreTrainedTokenizer , A : bool = False , A : Optional[str] = None , ) -> List[Any]: def _dataset(A : Dict , A : str=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' ) return LineByLineWithRefDataset( tokenizer=A , file_path=A , block_size=args.block_size , ref_path=A , ) return LineByLineTextDataset(tokenizer=A , file_path=A , block_size=args.block_size ) else: return TextDataset( tokenizer=A , file_path=A , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=A , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(A ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def __UpperCAmelCase ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCAmelCase_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( '''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file ''' '''or remove the --do_eval argument.''' ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. Use" ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , A ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: UpperCAmelCase_ : List[Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.tokenizer_name: UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another''' ''' script, save it,and load it from here, using --tokenizer_name''' ) if model_args.model_name_or_path: UpperCAmelCase_ : str = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , ) else: logger.info('''Training new model from scratch''' ) UpperCAmelCase_ : int = AutoModelWithLMHead.from_config(A ) model.resize_token_embeddings(len(A ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( '''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the''' '''--mlm flag (masked language modeling).''' ) if data_args.block_size <= 0: UpperCAmelCase_ : List[str] = tokenizer.max_len # Our input block size will be the max possible for the model else: UpperCAmelCase_ : Dict = min(data_args.block_size , tokenizer.max_len ) # Get datasets UpperCAmelCase_ : str = ( get_dataset(A , tokenizer=A , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) UpperCAmelCase_ : Any = ( get_dataset(A , tokenizer=A , evaluate=A , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": UpperCAmelCase_ : Optional[int] = DataCollatorForPermutationLanguageModeling( tokenizer=A , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: UpperCAmelCase_ : Tuple = DataCollatorForWholeWordMask( tokenizer=A , mlm_probability=data_args.mlm_probability ) else: UpperCAmelCase_ : List[str] = DataCollatorForLanguageModeling( tokenizer=A , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer UpperCAmelCase_ : Any = Trainer( model=A , args=A , data_collator=A , train_dataset=A , eval_dataset=A , prediction_loss_only=A , ) # Training if training_args.do_train: UpperCAmelCase_ : List[str] = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=A ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation UpperCAmelCase_ : Tuple = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) UpperCAmelCase_ : Dict = trainer.evaluate() UpperCAmelCase_ : Union[str, Any] = math.exp(eval_output['''eval_loss'''] ) UpperCAmelCase_ : Optional[int] = {'''perplexity''': perplexity} UpperCAmelCase_ : int = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' ) if trainer.is_world_master(): with open(A , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''' , A , str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) results.update(A ) return results def __UpperCAmelCase ( A : Tuple ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
304
1
'''simple docstring''' import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def __UpperCAmelCase ( A : Optional[Any] , A : List[Any] ) -> Tuple: UpperCAmelCase_ : Union[str, Any] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg''' UpperCAmelCase_ : Union[str, Any] = Image.open(requests.get(A , stream=A ).raw ).convert('''RGB''' ) UpperCAmelCase_ : List[Any] = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ), ] ) UpperCAmelCase_ : str = transform(A ).unsqueeze(0 ).to(A ) return image def __UpperCAmelCase ( A : Union[str, Any] ) -> Optional[int]: if "visual_encoder" in key: UpperCAmelCase_ : List[Any] = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , A ) if "blocks" in key: UpperCAmelCase_ : Optional[Any] = re.sub(r'''blocks''' , '''layers''' , A ) if "attn" in key: UpperCAmelCase_ : List[str] = re.sub(r'''attn''' , '''self_attn''' , A ) if "norm1" in key: UpperCAmelCase_ : Union[str, Any] = re.sub(r'''norm1''' , '''layer_norm1''' , A ) if "norm2" in key: UpperCAmelCase_ : Any = re.sub(r'''norm2''' , '''layer_norm2''' , A ) if "encoder.norm" in key: UpperCAmelCase_ : Optional[Any] = re.sub(r'''encoder.norm''' , '''post_layernorm''' , A ) if "encoder.patch_embed.proj" in key: UpperCAmelCase_ : Dict = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , A ) if "encoder.pos_embed" in key: UpperCAmelCase_ : List[str] = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , A ) if "encoder.cls_token" in key: UpperCAmelCase_ : str = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , A ) if "self_attn" in key: UpperCAmelCase_ : List[str] = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , A ) return key @torch.no_grad() def __UpperCAmelCase ( A : Tuple , A : Optional[Any]=None ) -> int: if config_path is not None: UpperCAmelCase_ : List[Any] = BlipConfig.from_pretrained(A ) else: UpperCAmelCase_ : Optional[Any] = BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} ) UpperCAmelCase_ : List[Any] = BlipForConditionalGeneration(A ).eval() UpperCAmelCase_ : Any = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth''' UpperCAmelCase_ : Any = blip_decoder(pretrained=A , image_size=3_8_4 , vit='''base''' ) UpperCAmelCase_ : int = pt_model.eval() UpperCAmelCase_ : int = pt_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase_ : List[Any] = modified_state_dict.pop(A ) UpperCAmelCase_ : Optional[Any] = rename_key(A ) UpperCAmelCase_ : Tuple = value hf_model.load_state_dict(A ) UpperCAmelCase_ : Dict = 3_8_4 UpperCAmelCase_ : str = load_demo_image(image_size=A , device='''cpu''' ) UpperCAmelCase_ : str = BertTokenizer.from_pretrained('''bert-base-uncased''' ) UpperCAmelCase_ : Dict = tokenizer(['''a picture of'''] ).input_ids UpperCAmelCase_ : Union[str, Any] = hf_model.generate(A , A ) assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2] UpperCAmelCase_ : Union[str, Any] = hf_model.generate(A ) assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(A ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' UpperCAmelCase_ : Optional[int] = ( '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth''' ) UpperCAmelCase_ : Tuple = blip_vqa(pretrained=A , image_size=A , vit='''base''' ) vqa_model.eval() UpperCAmelCase_ : Union[str, Any] = vqa_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase_ : int = modified_state_dict.pop(A ) UpperCAmelCase_ : Any = rename_key(A ) UpperCAmelCase_ : Union[str, Any] = value UpperCAmelCase_ : Dict = BlipForQuestionAnswering(A ) hf_vqa_model.load_state_dict(A ) UpperCAmelCase_ : Optional[int] = ['''How many dogs are in this image?'''] UpperCAmelCase_ : Optional[int] = tokenizer(A , return_tensors='''pt''' ).input_ids UpperCAmelCase_ : Any = hf_vqa_model.generate(A , A ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' ) UpperCAmelCase_ : List[str] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth''' UpperCAmelCase_ : int = blip_itm(pretrained=A , image_size=A , vit='''base''' ) itm_model.eval() UpperCAmelCase_ : Any = itm_model.state_dict() for key in modified_state_dict.copy(): UpperCAmelCase_ : Optional[Any] = modified_state_dict.pop(A ) UpperCAmelCase_ : Tuple = rename_key(A ) UpperCAmelCase_ : Tuple = value UpperCAmelCase_ : int = BlipForImageTextRetrieval(A ) UpperCAmelCase_ : List[str] = ['''A picture of a woman with a dog sitting in a beach'''] UpperCAmelCase_ : Optional[int] = tokenizer( A , return_tensors='''pt''' , padding='''max_length''' , truncation=A , max_length=3_5 , ).input_ids hf_itm_model.load_state_dict(A ) hf_itm_model.eval() UpperCAmelCase_ : List[Any] = hf_itm_model(A , A , use_itm_head=A ) UpperCAmelCase_ : List[str] = hf_itm_model(A , A , use_itm_head=A ) assert out[0].item() == 0.2110687494277954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' ) if __name__ == "__main__": _UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') _UpperCamelCase : Dict = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
304
'''simple docstring''' import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel _UpperCamelCase : Optional[int] = '0.12' # assumed parallelism: 8 @require_flax @is_staging_test class snake_case__ ( unittest.TestCase): @classmethod def A ( cls : Optional[int] ) -> Tuple: UpperCAmelCase_ : List[str] = TOKEN HfFolder.save_token(_A ) @classmethod def A ( cls : int ) -> Tuple: try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def A ( self : Dict ) -> Optional[int]: UpperCAmelCase_ : List[Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) UpperCAmelCase_ : List[str] = FlaxBertModel(_A ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) UpperCAmelCase_ : Any = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" ) UpperCAmelCase_ : int = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : List[str] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_A , repo_id='''test-model-flax''' , push_to_hub=_A , use_auth_token=self._token ) UpperCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" ) UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : int = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" ) def A ( self : str ) -> Tuple: UpperCAmelCase_ : List[str] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) UpperCAmelCase_ : Optional[Any] = FlaxBertModel(_A ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) UpperCAmelCase_ : List[str] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : Any = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( _A , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_A , use_auth_token=self._token ) UpperCAmelCase_ : int = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : Tuple = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" ) def __UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] ) -> List[Any]: UpperCAmelCase_ : Optional[int] = True UpperCAmelCase_ : Optional[int] = flatten_dict(modela.params ) UpperCAmelCase_ : str = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4: UpperCAmelCase_ : int = False return models_are_equal @require_flax class snake_case__ ( unittest.TestCase): def A ( self : Any ) -> Any: UpperCAmelCase_ : Any = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) UpperCAmelCase_ : Any = FlaxBertModel(_A ) UpperCAmelCase_ : Tuple = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_A , _A ) ) with self.assertRaises(_A ): UpperCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(_A ) UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A ) self.assertTrue(check_models_equal(_A , _A ) ) def A ( self : int ) -> Tuple: UpperCAmelCase_ : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) UpperCAmelCase_ : Tuple = FlaxBertModel(_A ) UpperCAmelCase_ : str = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_A , _A ) , max_shard_size='''10KB''' ) with self.assertRaises(_A ): UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(_A ) UpperCAmelCase_ : Dict = FlaxBertModel.from_pretrained(_A , subfolder=_A ) self.assertTrue(check_models_equal(_A , _A ) ) def A ( self : int ) -> Optional[int]: UpperCAmelCase_ : int = '''bert''' UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(_A ): UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(_A ) UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(_A , subfolder=_A ) self.assertIsNotNone(_A ) def A ( self : Any ) -> str: UpperCAmelCase_ : Optional[Any] = '''bert''' UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(_A ): UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A ) UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A ) self.assertIsNotNone(_A )
304
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _UpperCamelCase : Union[str, Any] = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : List[Any] = ['SpeechEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Optional[int] = ['FlaxSpeechEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys _UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
304
'''simple docstring''' _UpperCamelCase : Tuple = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _UpperCamelCase : Any = [{'type': 'code', 'content': INSTALL_CONTENT}] _UpperCamelCase : Dict = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
304
1
'''simple docstring''' import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path _UpperCamelCase : List[str] = [ {'dataset': 'wikipedia', 'config_name': '20220301.de'}, {'dataset': 'wikipedia', 'config_name': '20220301.en'}, {'dataset': 'wikipedia', 'config_name': '20220301.fr'}, {'dataset': 'wikipedia', 'config_name': '20220301.frr'}, {'dataset': 'wikipedia', 'config_name': '20220301.it'}, {'dataset': 'wikipedia', 'config_name': '20220301.simple'}, {'dataset': 'snli', 'config_name': 'plain_text'}, {'dataset': 'eli5', 'config_name': 'LFQA_reddit'}, {'dataset': 'wiki40b', 'config_name': 'en'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'}, {'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'}, {'dataset': 'natural_questions', 'config_name': 'default'}, ] def __UpperCAmelCase ( A : Optional[Any]=True ) -> Union[str, Any]: if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=UpperCamelCase)) class snake_case__ ( UpperCamelCase): a_ = None a_ = None def A ( self : Optional[int] , _A : Any , _A : Any ) -> Union[str, Any]: with TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : Optional[Any] = dataset_module_factory(_A , cache_dir=_A ) UpperCAmelCase_ : int = import_main_class(dataset_module.module_path , dataset=_A ) UpperCAmelCase_ : DatasetBuilder = builder_cls( cache_dir=_A , config_name=_A , hash=dataset_module.hash , ) UpperCAmelCase_ : Optional[Any] = '''/'''.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=_A ).replace(os.sep , '''/''' ), config.DATASET_INFO_FILENAME, ] ) UpperCAmelCase_ : Union[str, Any] = cached_path(_A , cache_dir=_A ) self.assertTrue(os.path.exists(_A ) ) @pytest.mark.integration def __UpperCAmelCase ( A : int ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple''' UpperCAmelCase_ : Union[str, Any] = dataset_module_factory('''wikipedia''' , cache_dir=A ) UpperCAmelCase_ : Optional[int] = import_main_class(dataset_module.module_path ) UpperCAmelCase_ : DatasetBuilder = builder_cls( cache_dir=A , config_name='''20220301.frr''' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam UpperCAmelCase_ : List[str] = None builder_instance.download_and_prepare() UpperCAmelCase_ : int = builder_instance.as_dataset() assert ds @pytest.mark.integration def __UpperCAmelCase ( A : Optional[int] ) -> int: UpperCAmelCase_ : List[str] = dataset_module_factory('''wikipedia''' , cache_dir=A ) UpperCAmelCase_ : Optional[Any] = import_main_class(dataset_module.module_path , dataset=A ) UpperCAmelCase_ : DatasetBuilder = builder_cls( cache_dir=A , config_name='''20220301.frr''' , hash=dataset_module.hash , ) UpperCAmelCase_ : str = builder_instance.as_streaming_dataset() assert ds assert isinstance(A , A ) assert "train" in ds assert isinstance(ds['''train'''] , A ) assert next(iter(ds['''train'''] ) )
304
'''simple docstring''' import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def __UpperCAmelCase ( A : List[str] , A : Any , A : Optional[int] , A : Optional[int] ) -> Optional[Any]: if isinstance(A , A ): UpperCAmelCase_ : Any = np.full((len(A ), sequence_length, 2) , A ) else: UpperCAmelCase_ : int = np.full((len(A ), sequence_length) , A ) for i, tensor in enumerate(A ): if padding_side == "right": if isinstance(A , A ): UpperCAmelCase_ : Tuple = tensor[:sequence_length] else: UpperCAmelCase_ : Dict = tensor[:sequence_length] else: if isinstance(A , A ): UpperCAmelCase_ : Optional[Any] = tensor[:sequence_length] else: UpperCAmelCase_ : int = tensor[:sequence_length] return out_tensor.tolist() def __UpperCAmelCase ( A : List[Any] ) -> str: UpperCAmelCase_ : Dict = ord(A ) if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6): return True UpperCAmelCase_ : Union[str, Any] = unicodedata.category(A ) if cat.startswith('''P''' ): return True return False @dataclass class snake_case__ ( UpperCamelCase): a_ = 42 a_ = True a_ = None a_ = None a_ = -100 a_ = "pt" def A ( self : List[Any] , _A : Dict ) -> Tuple: import torch UpperCAmelCase_ : Dict = '''label''' if '''label''' in features[0].keys() else '''labels''' UpperCAmelCase_ : List[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None UpperCAmelCase_ : Tuple = self.tokenizer.pad( _A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch UpperCAmelCase_ : Any = torch.tensor(batch['''entity_ids'''] ).shape[1] UpperCAmelCase_ : Union[str, Any] = self.tokenizer.padding_side if padding_side == "right": UpperCAmelCase_ : Optional[Any] = [ list(_A ) + [self.label_pad_token_id] * (sequence_length - len(_A )) for label in labels ] else: UpperCAmelCase_ : Any = [ [self.label_pad_token_id] * (sequence_length - len(_A )) + list(_A ) for label in labels ] UpperCAmelCase_ : Union[str, Any] = [feature['''ner_tags'''] for feature in features] UpperCAmelCase_ : Union[str, Any] = padding_tensor(_A , -1 , _A , _A ) UpperCAmelCase_ : List[str] = [feature['''original_entity_spans'''] for feature in features] UpperCAmelCase_ : int = padding_tensor(_A , (-1, -1) , _A , _A ) UpperCAmelCase_ : Union[str, Any] = {k: torch.tensor(_A , dtype=torch.intaa ) for k, v in batch.items()} return batch
304
1
'''simple docstring''' from math import factorial, radians def __UpperCAmelCase ( A : float , A : int = 1_8 , A : int = 1_0 ) -> float: UpperCAmelCase_ : Optional[int] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians UpperCAmelCase_ : Optional[Any] = radians(A ) UpperCAmelCase_ : Optional[Any] = angle_in_radians UpperCAmelCase_ : Optional[int] = 3 UpperCAmelCase_ : Tuple = -1 for _ in range(A ): result += (b * (angle_in_radians**a)) / factorial(A ) UpperCAmelCase_ : List[Any] = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(A , A ) if __name__ == "__main__": __import__('doctest').testmod()
304
'''simple docstring''' import functools def __UpperCAmelCase ( A : str , A : str ) -> int: UpperCAmelCase_ : Optional[Any] = len(A ) UpperCAmelCase_ : List[str] = len(A ) @functools.cache def min_distance(A : int , A : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCAmelCase_ : Any = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , A ) , 1 + min_distance(A , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
304
1
'''simple docstring''' from __future__ import annotations import math import random from typing import Any class snake_case__ : def __init__( self : List[Any] ) -> None: UpperCAmelCase_ : list[Any] = [] UpperCAmelCase_ : int = 0 UpperCAmelCase_ : int = 0 def A ( self : Optional[Any] ) -> bool: return self.head == self.tail def A ( self : List[str] , _A : Any ) -> None: self.data.append(_A ) UpperCAmelCase_ : Optional[int] = self.tail + 1 def A ( self : str ) -> Any: UpperCAmelCase_ : Optional[int] = self.data[self.head] UpperCAmelCase_ : Optional[int] = self.head + 1 return ret def A ( self : Any ) -> int: return self.tail - self.head def A ( self : Any ) -> None: print(self.data ) print('''**************''' ) print(self.data[self.head : self.tail] ) class snake_case__ : def __init__( self : Optional[Any] , _A : Any ) -> None: UpperCAmelCase_ : int = data UpperCAmelCase_ : MyNode | None = None UpperCAmelCase_ : MyNode | None = None UpperCAmelCase_ : int = 1 def A ( self : Union[str, Any] ) -> Any: return self.data def A ( self : List[str] ) -> MyNode | None: return self.left def A ( self : Union[str, Any] ) -> MyNode | None: return self.right def A ( self : Union[str, Any] ) -> int: return self.height def A ( self : List[Any] , _A : Any ) -> None: UpperCAmelCase_ : Dict = data def A ( self : Optional[Any] , _A : MyNode | None ) -> None: UpperCAmelCase_ : Dict = node def A ( self : Optional[Any] , _A : MyNode | None ) -> None: UpperCAmelCase_ : int = node def A ( self : int , _A : int ) -> None: UpperCAmelCase_ : List[str] = height def __UpperCAmelCase ( A : MyNode | None ) -> int: if node is None: return 0 return node.get_height() def __UpperCAmelCase ( A : int , A : int ) -> int: if a > b: return a return b def __UpperCAmelCase ( A : MyNode ) -> MyNode: print('''left rotation node:''' , node.get_data() ) UpperCAmelCase_ : Optional[Any] = node.get_left() assert ret is not None node.set_left(ret.get_right() ) ret.set_right(A ) UpperCAmelCase_ : List[str] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(A ) UpperCAmelCase_ : int = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(A ) return ret def __UpperCAmelCase ( A : MyNode ) -> MyNode: print('''right rotation node:''' , node.get_data() ) UpperCAmelCase_ : str = node.get_right() assert ret is not None node.set_right(ret.get_left() ) ret.set_left(A ) UpperCAmelCase_ : str = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(A ) UpperCAmelCase_ : Optional[int] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(A ) return ret def __UpperCAmelCase ( A : MyNode ) -> MyNode: UpperCAmelCase_ : int = node.get_left() assert left_child is not None node.set_left(left_rotation(A ) ) return right_rotation(A ) def __UpperCAmelCase ( A : MyNode ) -> MyNode: UpperCAmelCase_ : str = node.get_right() assert right_child is not None node.set_right(right_rotation(A ) ) return left_rotation(A ) def __UpperCAmelCase ( A : MyNode | None , A : Any ) -> MyNode | None: if node is None: return MyNode(A ) if data < node.get_data(): node.set_left(insert_node(node.get_left() , A ) ) if ( get_height(node.get_left() ) - get_height(node.get_right() ) == 2 ): # an unbalance detected UpperCAmelCase_ : str = node.get_left() assert left_child is not None if ( data < left_child.get_data() ): # new node is the left child of the left child UpperCAmelCase_ : List[Any] = right_rotation(A ) else: UpperCAmelCase_ : Tuple = lr_rotation(A ) else: node.set_right(insert_node(node.get_right() , A ) ) if get_height(node.get_right() ) - get_height(node.get_left() ) == 2: UpperCAmelCase_ : List[str] = node.get_right() assert right_child is not None if data < right_child.get_data(): UpperCAmelCase_ : List[str] = rl_rotation(A ) else: UpperCAmelCase_ : Dict = left_rotation(A ) UpperCAmelCase_ : List[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(A ) return node def __UpperCAmelCase ( A : MyNode ) -> Any: while True: UpperCAmelCase_ : Dict = root.get_right() if right_child is None: break UpperCAmelCase_ : Union[str, Any] = right_child return root.get_data() def __UpperCAmelCase ( A : MyNode ) -> Any: while True: UpperCAmelCase_ : Union[str, Any] = root.get_left() if left_child is None: break UpperCAmelCase_ : int = left_child return root.get_data() def __UpperCAmelCase ( A : MyNode , A : Any ) -> MyNode | None: UpperCAmelCase_ : Any = root.get_left() UpperCAmelCase_ : List[str] = root.get_right() if root.get_data() == data: if left_child is not None and right_child is not None: UpperCAmelCase_ : Tuple = get_left_most(A ) root.set_data(A ) root.set_right(del_node(A , A ) ) elif left_child is not None: UpperCAmelCase_ : int = left_child elif right_child is not None: UpperCAmelCase_ : Any = right_child else: return None elif root.get_data() > data: if left_child is None: print('''No such data''' ) return root else: root.set_left(del_node(A , A ) ) else: # root.get_data() < data if right_child is None: return root else: root.set_right(del_node(A , A ) ) if get_height(A ) - get_height(A ) == 2: assert right_child is not None if get_height(right_child.get_right() ) > get_height(right_child.get_left() ): UpperCAmelCase_ : Union[str, Any] = left_rotation(A ) else: UpperCAmelCase_ : Optional[Any] = rl_rotation(A ) elif get_height(A ) - get_height(A ) == -2: assert left_child is not None if get_height(left_child.get_left() ) > get_height(left_child.get_right() ): UpperCAmelCase_ : Optional[Any] = right_rotation(A ) else: UpperCAmelCase_ : Optional[int] = lr_rotation(A ) UpperCAmelCase_ : List[Any] = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1 root.set_height(A ) return root class snake_case__ : def __init__( self : List[str] ) -> None: UpperCAmelCase_ : MyNode | None = None def A ( self : Dict ) -> int: return get_height(self.root ) def A ( self : str , _A : Any ) -> None: print('''insert:''' + str(_A ) ) UpperCAmelCase_ : Optional[int] = insert_node(self.root , _A ) def A ( self : Optional[Any] , _A : Any ) -> None: print('''delete:''' + str(_A ) ) if self.root is None: print('''Tree is empty!''' ) return UpperCAmelCase_ : Union[str, Any] = del_node(self.root , _A ) def __str__( self : Union[str, Any] , ) -> str: # a level traversale, gives a more intuitive look on the tree UpperCAmelCase_ : List[Any] = '''''' UpperCAmelCase_ : Optional[int] = MyQueue() q.push(self.root ) UpperCAmelCase_ : List[str] = self.get_height() if layer == 0: return output UpperCAmelCase_ : Tuple = 0 while not q.is_empty(): UpperCAmelCase_ : str = q.pop() UpperCAmelCase_ : Dict = ''' ''' * int(math.pow(2 , layer - 1 ) ) output += space if node is None: output += "*" q.push(_A ) q.push(_A ) else: output += str(node.get_data() ) q.push(node.get_left() ) q.push(node.get_right() ) output += space UpperCAmelCase_ : Optional[int] = cnt + 1 for i in range(1_00 ): if cnt == math.pow(2 , _A ) - 1: UpperCAmelCase_ : str = layer - 1 if layer == 0: output += "\n*************************************" return output output += "\n" break output += "\n*************************************" return output def __UpperCAmelCase ( ) -> None: import doctest doctest.testmod() if __name__ == "__main__": _test() _UpperCamelCase : List[Any] = AVLtree() _UpperCamelCase : int = list(range(10)) random.shuffle(lst) for i in lst: t.insert(i) print(str(t)) random.shuffle(lst) for i in lst: t.del_node(i) print(str(t))
304
'''simple docstring''' def __UpperCAmelCase ( A : int = 1_0_0_0 ) -> int: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = 1, 1 UpperCAmelCase_ : Dict = [] for i in range(1 , n + 1 ): UpperCAmelCase_ : Optional[int] = prev_numerator + 2 * prev_denominator UpperCAmelCase_ : Tuple = prev_numerator + prev_denominator if len(str(A ) ) > len(str(A ) ): result.append(A ) UpperCAmelCase_ : Optional[Any] = numerator UpperCAmelCase_ : Optional[int] = denominator return len(A ) if __name__ == "__main__": print(f'''{solution() = }''')
304
1
'''simple docstring''' import cva import numpy as np class snake_case__ : def __init__( self : Optional[int] , _A : float , _A : int ) -> str: if k in (0.04, 0.06): UpperCAmelCase_ : Optional[int] = k UpperCAmelCase_ : Dict = window_size else: raise ValueError('''invalid k value''' ) def __str__( self : List[Any] ) -> str: return str(self.k ) def A ( self : Tuple , _A : str ) -> tuple[cva.Mat, list[list[int]]]: UpperCAmelCase_ : Union[str, Any] = cva.imread(_A , 0 ) UpperCAmelCase_ , UpperCAmelCase_ : int = img.shape UpperCAmelCase_ : list[list[int]] = [] UpperCAmelCase_ : Optional[Any] = img.copy() UpperCAmelCase_ : List[Any] = cva.cvtColor(_A , cva.COLOR_GRAY2RGB ) UpperCAmelCase_ , UpperCAmelCase_ : Dict = np.gradient(_A ) UpperCAmelCase_ : str = dx**2 UpperCAmelCase_ : Any = dy**2 UpperCAmelCase_ : Any = dx * dy UpperCAmelCase_ : Optional[int] = 0.04 UpperCAmelCase_ : Dict = self.window_size // 2 for y in range(_A , h - offset ): for x in range(_A , w - offset ): UpperCAmelCase_ : List[str] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase_ : int = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase_ : List[str] = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() UpperCAmelCase_ : List[str] = (wxx * wyy) - (wxy**2) UpperCAmelCase_ : List[Any] = wxx + wyy UpperCAmelCase_ : int = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 2_55 ) return color_img, corner_list if __name__ == "__main__": _UpperCamelCase : Dict = HarrisCorner(0.04, 3) _UpperCamelCase , _UpperCamelCase : Union[str, Any] = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
304
'''simple docstring''' import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class snake_case__ ( unittest.TestCase): def __init__( self : int , _A : List[str] , _A : Dict=7 , _A : List[str]=3 , _A : List[str]=18 , _A : Dict=30 , _A : Union[str, Any]=4_00 , _A : List[str]=True , _A : List[str]=None , _A : int=True , _A : Tuple=None , _A : Union[str, Any]=True , _A : Tuple=[0.5, 0.5, 0.5] , _A : Union[str, Any]=[0.5, 0.5, 0.5] , _A : Tuple=False , ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = size if size is not None else {'''height''': 20, '''width''': 20} UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : Optional[int] = batch_size UpperCAmelCase_ : Any = num_channels UpperCAmelCase_ : Optional[Any] = image_size UpperCAmelCase_ : Tuple = min_resolution UpperCAmelCase_ : Tuple = max_resolution UpperCAmelCase_ : Optional[int] = do_resize UpperCAmelCase_ : Tuple = size UpperCAmelCase_ : Optional[Any] = do_center_crop UpperCAmelCase_ : Optional[int] = crop_size UpperCAmelCase_ : Tuple = do_normalize UpperCAmelCase_ : Optional[Any] = image_mean UpperCAmelCase_ : int = image_std UpperCAmelCase_ : List[Any] = do_reduce_labels def A ( self : Union[str, Any] ) -> str: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def __UpperCAmelCase ( ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) UpperCAmelCase_ : Optional[Any] = Image.open(dataset[0]['''file'''] ) UpperCAmelCase_ : str = Image.open(dataset[1]['''file'''] ) return image, map def __UpperCAmelCase ( ) -> Any: UpperCAmelCase_ : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) UpperCAmelCase_ : int = Image.open(ds[0]['''file'''] ) UpperCAmelCase_ : Optional[Any] = Image.open(ds[1]['''file'''] ) UpperCAmelCase_ : Dict = Image.open(ds[2]['''file'''] ) UpperCAmelCase_ : List[str] = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class snake_case__ ( UpperCamelCase , unittest.TestCase): a_ = BeitImageProcessor if is_vision_available() else None def A ( self : Optional[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = BeitImageProcessingTester(self ) @property def A ( self : List[Any] ) -> Tuple: return self.image_processor_tester.prepare_image_processor_dict() def A ( self : List[Any] ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''center_crop''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) def A ( self : List[str] ) -> Optional[int]: UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) self.assertEqual(image_processor.do_reduce_labels , _A ) UpperCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) self.assertEqual(image_processor.do_reduce_labels , _A ) def A ( self : Optional[Any] ) -> Any: pass def A ( self : List[str] ) -> Optional[int]: # Initialize image_processing UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase_ : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def A ( self : Union[str, Any] ) -> Union[str, Any]: # Initialize image_processing UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def A ( self : Optional[int] ) -> str: # Initialize image_processing UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def A ( self : Any ) -> Optional[Any]: # Initialize image_processing UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) UpperCAmelCase_ : Union[str, Any] = [] for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input UpperCAmelCase_ : str = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test batched UpperCAmelCase_ : List[Any] = image_processing(_A , _A , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test not batched input (PIL images) UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs() UpperCAmelCase_ : List[str] = image_processing(_A , _A , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test batched input (PIL images) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = prepare_semantic_batch_inputs() UpperCAmelCase_ : int = image_processing(_A , _A , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) def A ( self : List[Any] ) -> Union[str, Any]: # Initialize image_processing UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs() UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 1_50 ) UpperCAmelCase_ : int = True UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
304
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer _UpperCamelCase : List[str] = logging.get_logger(__name__) _UpperCamelCase : Any = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} _UpperCamelCase : Tuple = { 'vocab_file': { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json' ), }, } _UpperCamelCase : int = { 'yjernite/retribert-base-uncased': 512, } _UpperCamelCase : Tuple = { 'yjernite/retribert-base-uncased': {'do_lower_case': True}, } class snake_case__ ( UpperCamelCase): a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = PRETRAINED_INIT_CONFIGURATION a_ = RetriBertTokenizer a_ = ["input_ids", "attention_mask"] def __init__( self : str , _A : Any=None , _A : List[Any]=None , _A : List[str]=True , _A : Any="[UNK]" , _A : Union[str, Any]="[SEP]" , _A : Dict="[PAD]" , _A : Any="[CLS]" , _A : Any="[MASK]" , _A : Union[str, Any]=True , _A : str=None , **_A : Tuple , ) -> str: super().__init__( _A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , ) UpperCAmelCase_ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _A ) != do_lower_case or normalizer_state.get('''strip_accents''' , _A ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars ): UpperCAmelCase_ : Union[str, Any] = getattr(_A , normalizer_state.pop('''type''' ) ) UpperCAmelCase_ : Any = do_lower_case UpperCAmelCase_ : str = strip_accents UpperCAmelCase_ : List[str] = tokenize_chinese_chars UpperCAmelCase_ : List[str] = normalizer_class(**_A ) UpperCAmelCase_ : Union[str, Any] = do_lower_case def A ( self : str , _A : str , _A : Optional[Any]=None ) -> Any: UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def A ( self : int , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: UpperCAmelCase_ : List[str] = [self.sep_token_id] UpperCAmelCase_ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def A ( self : Any , _A : str , _A : Optional[str] = None ) -> Tuple[str]: UpperCAmelCase_ : int = self._tokenizer.model.save(_A , name=_A ) return tuple(_A )
304
'''simple docstring''' import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class snake_case__ ( enum.Enum): a_ = 0 a_ = 1 a_ = 2 @add_end_docstrings(UpperCamelCase) class snake_case__ ( UpperCamelCase): a_ = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n " def __init__( self : List[str] , *_A : Dict , **_A : int ) -> Optional[int]: super().__init__(*_A , **_A ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. UpperCAmelCase_ : Dict = None if self.model.config.prefix is not None: UpperCAmelCase_ : Tuple = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. UpperCAmelCase_ : Optional[Any] = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self._sanitize_parameters(prefix=_A , **self._forward_params ) UpperCAmelCase_ : int = {**self._preprocess_params, **preprocess_params} UpperCAmelCase_ : List[str] = {**self._forward_params, **forward_params} def A ( self : Union[str, Any] , _A : int=None , _A : str=None , _A : Union[str, Any]=None , _A : List[Any]=None , _A : List[Any]=None , _A : int=None , _A : Optional[int]=None , _A : List[Any]=None , **_A : List[Any] , ) -> Dict: UpperCAmelCase_ : Union[str, Any] = {} if prefix is not None: UpperCAmelCase_ : List[Any] = prefix if prefix: UpperCAmelCase_ : Tuple = self.tokenizer( _A , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) UpperCAmelCase_ : List[Any] = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected" ''' [None, \'hole\']''' ) UpperCAmelCase_ : Union[str, Any] = handle_long_generation preprocess_params.update(_A ) UpperCAmelCase_ : Optional[int] = generate_kwargs UpperCAmelCase_ : Tuple = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) UpperCAmelCase_ : int = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) UpperCAmelCase_ : List[Any] = ReturnType.TENSORS if return_type is not None: UpperCAmelCase_ : List[Any] = return_type if clean_up_tokenization_spaces is not None: UpperCAmelCase_ : List[Any] = clean_up_tokenization_spaces if stop_sequence is not None: UpperCAmelCase_ : Any = self.tokenizer.encode(_A , add_special_tokens=_A ) if len(_A ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) UpperCAmelCase_ : str = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def A ( self : Dict , *_A : Optional[Any] , **_A : Any ) -> Any: # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*_A , **_A ) def __call__( self : List[Any] , _A : Union[str, Any] , **_A : List[str] ) -> Dict: return super().__call__(_A , **_A ) def A ( self : List[Any] , _A : List[Any] , _A : Any="" , _A : Dict=None , **_A : Dict ) -> Optional[Any]: UpperCAmelCase_ : Tuple = self.tokenizer( prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) UpperCAmelCase_ : str = prompt_text if handle_long_generation == "hole": UpperCAmelCase_ : List[str] = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: UpperCAmelCase_ : Optional[int] = generate_kwargs['''max_new_tokens'''] else: UpperCAmelCase_ : Union[str, Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: UpperCAmelCase_ : Dict = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) UpperCAmelCase_ : List[str] = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: UpperCAmelCase_ : Optional[int] = inputs['''attention_mask'''][:, -keep_length:] return inputs def A ( self : List[str] , _A : Optional[Any] , **_A : str ) -> Optional[int]: UpperCAmelCase_ : Any = model_inputs['''input_ids'''] UpperCAmelCase_ : Dict = model_inputs.get('''attention_mask''' , _A ) # Allow empty prompts if input_ids.shape[1] == 0: UpperCAmelCase_ : Any = None UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : Union[str, Any] = 1 else: UpperCAmelCase_ : Optional[int] = input_ids.shape[0] UpperCAmelCase_ : Dict = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. UpperCAmelCase_ : List[str] = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: UpperCAmelCase_ : str = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: UpperCAmelCase_ : Any = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length UpperCAmelCase_ : Optional[Any] = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL UpperCAmelCase_ : Union[str, Any] = self.model.generate(input_ids=_A , attention_mask=_A , **_A ) UpperCAmelCase_ : Any = generated_sequence.shape[0] if self.framework == "pt": UpperCAmelCase_ : List[str] = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": UpperCAmelCase_ : int = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def A ( self : int , _A : List[Any] , _A : Dict=ReturnType.FULL_TEXT , _A : Dict=True ) -> Union[str, Any]: UpperCAmelCase_ : List[str] = model_outputs['''generated_sequence'''][0] UpperCAmelCase_ : int = model_outputs['''input_ids'''] UpperCAmelCase_ : str = model_outputs['''prompt_text'''] UpperCAmelCase_ : Any = generated_sequence.numpy().tolist() UpperCAmelCase_ : int = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: UpperCAmelCase_ : Optional[Any] = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text UpperCAmelCase_ : Any = self.tokenizer.decode( _A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: UpperCAmelCase_ : List[str] = 0 else: UpperCAmelCase_ : str = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) ) if return_type == ReturnType.FULL_TEXT: UpperCAmelCase_ : Dict = prompt_text + text[prompt_length:] else: UpperCAmelCase_ : Dict = text[prompt_length:] UpperCAmelCase_ : List[str] = {'''generated_text''': all_text} records.append(_A ) return records
304
1
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _UpperCamelCase : Tuple = logging.get_logger(__name__) _UpperCamelCase : Optional[int] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} _UpperCamelCase : str = { 'tokenizer_file': { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json', }, } _UpperCamelCase : Union[str, Any] = { 'gpt-neox-20b': 2_048, } class snake_case__ ( UpperCamelCase): a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["input_ids", "attention_mask"] def __init__( self : Any , _A : List[Any]=None , _A : Dict=None , _A : int=None , _A : Union[str, Any]="<|endoftext|>" , _A : Union[str, Any]="<|endoftext|>" , _A : Union[str, Any]="<|endoftext|>" , _A : Union[str, Any]=False , **_A : Union[str, Any] , ) -> Union[str, Any]: super().__init__( _A , _A , tokenizer_file=_A , unk_token=_A , bos_token=_A , eos_token=_A , add_prefix_space=_A , **_A , ) UpperCAmelCase_ : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space: UpperCAmelCase_ : List[Any] = getattr(_A , pre_tok_state.pop('''type''' ) ) UpperCAmelCase_ : Dict = add_prefix_space UpperCAmelCase_ : Dict = pre_tok_class(**_A ) UpperCAmelCase_ : Tuple = add_prefix_space def A ( self : Optional[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]: UpperCAmelCase_ : Any = self._tokenizer.model.save(_A , name=_A ) return tuple(_A ) def A ( self : Optional[Any] , _A : "Conversation" ) -> List[int]: UpperCAmelCase_ : Tuple = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(_A , add_special_tokens=_A ) + [self.eos_token_id] ) if len(_A ) > self.model_max_length: UpperCAmelCase_ : List[Any] = input_ids[-self.model_max_length :] return input_ids
304
'''simple docstring''' from __future__ import annotations import math def __UpperCAmelCase ( A : int , A : int , A : bool , A : list[int] , A : float ) -> int: if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if not scores: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , ) if is_max else min( minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , ) ) def __UpperCAmelCase ( ) -> None: UpperCAmelCase_ : List[str] = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3] UpperCAmelCase_ : List[Any] = math.log(len(A ) , 2 ) print(F"Optimal value : {minimax(0 , 0 , A , A , A )}" ) if __name__ == "__main__": import doctest doctest.testmod() main()
304
1
'''simple docstring''' def __UpperCAmelCase ( A : int , A : int ) -> int: return int(input_a == input_a == 0 ) def __UpperCAmelCase ( ) -> None: print('''Truth Table of NOR Gate:''' ) print('''| Input 1 | Input 2 | Output |''' ) print(F"| 0 | 0 | {nor_gate(0 , 0 )} |" ) print(F"| 0 | 1 | {nor_gate(0 , 1 )} |" ) print(F"| 1 | 0 | {nor_gate(1 , 0 )} |" ) print(F"| 1 | 1 | {nor_gate(1 , 1 )} |" ) if __name__ == "__main__": import doctest doctest.testmod() main()
304
'''simple docstring''' from __future__ import annotations def __UpperCAmelCase ( A : list , A : int , A : int , A : int ) -> list: UpperCAmelCase_ : Any = [] UpperCAmelCase_ , UpperCAmelCase_ : Tuple = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) UpperCAmelCase_ : List[Any] = result + left + right return input_list def __UpperCAmelCase ( A : list ) -> list: if len(A ) <= 1: return input_list UpperCAmelCase_ : List[str] = list(A ) # iteration for two-way merging UpperCAmelCase_ : Tuple = 2 while p <= len(A ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(A ) , A ): UpperCAmelCase_ : Union[str, Any] = i UpperCAmelCase_ : int = i + p - 1 UpperCAmelCase_ : Any = (low + high + 1) // 2 UpperCAmelCase_ : Union[str, Any] = merge(A , A , A , A ) # final merge of last two parts if p * 2 >= len(A ): UpperCAmelCase_ : str = i UpperCAmelCase_ : Tuple = merge(A , 0 , A , len(A ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": _UpperCamelCase : str = input('Enter numbers separated by a comma:\n').strip() if user_input == "": _UpperCamelCase : List[str] = [] else: _UpperCamelCase : Optional[int] = [int(item.strip()) for item in user_input.split(',')] print(iter_merge_sort(unsorted))
304
1
'''simple docstring''' import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def __UpperCAmelCase ( A : Union[str, Any] , A : Dict , A : int , A : str="attention" ) -> int: UpperCAmelCase_ : Dict = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"] UpperCAmelCase_ : Tuple = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"] UpperCAmelCase_ : List[str] = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"] UpperCAmelCase_ : Optional[int] = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"] return k, o, q, v def __UpperCAmelCase ( A : Dict , A : Union[str, Any] , A : int , A : Optional[int]=False ) -> Any: if split_mlp_wi: UpperCAmelCase_ : Union[str, Any] = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"] UpperCAmelCase_ : Tuple = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"] UpperCAmelCase_ : Optional[Any] = (wi_a, wi_a) else: UpperCAmelCase_ : Dict = params[F"{prefix}/layers_{i}/mlp/wi/kernel"] UpperCAmelCase_ : int = params[F"{prefix}/layers_{i}/mlp/wo/kernel"] return wi, wo def __UpperCAmelCase ( A : List[str] , A : List[str] , A : Dict , A : Any ) -> Optional[int]: return params[F"{prefix}/layers_{i}/{layer_name}/scale"] def __UpperCAmelCase ( A : dict , *, A : int , A : bool ) -> Dict: UpperCAmelCase_ : Union[str, Any] = traverse_util.flatten_dict(variables['''target'''] ) UpperCAmelCase_ : Optional[int] = {'''/'''.join(A ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi UpperCAmelCase_ : int = '''encoder/layers_0/mlp/wi_0/kernel''' in old print('''Split MLP:''' , A ) UpperCAmelCase_ : Optional[Any] = collections.OrderedDict() # Shared embeddings. UpperCAmelCase_ : Union[str, Any] = old['''token_embedder/embedding'''] # Encoder. for i in range(A ): # Block i, layer 0 (Self Attention). UpperCAmelCase_ : int = tax_layer_norm_lookup(A , A , '''encoder''' , '''pre_attention_layer_norm''' ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = tax_attention_lookup(A , A , '''encoder''' , '''attention''' ) UpperCAmelCase_ : Optional[Any] = layer_norm UpperCAmelCase_ : str = k.T UpperCAmelCase_ : Any = o.T UpperCAmelCase_ : int = q.T UpperCAmelCase_ : str = v.T # Block i, layer 1 (MLP). UpperCAmelCase_ : Any = tax_layer_norm_lookup(A , A , '''encoder''' , '''pre_mlp_layer_norm''' ) UpperCAmelCase_ , UpperCAmelCase_ : str = tax_mlp_lookup(A , A , '''encoder''' , A ) UpperCAmelCase_ : Dict = layer_norm if split_mlp_wi: UpperCAmelCase_ : Optional[Any] = wi[0].T UpperCAmelCase_ : Dict = wi[1].T else: UpperCAmelCase_ : str = wi.T UpperCAmelCase_ : Union[str, Any] = wo.T UpperCAmelCase_ : Optional[Any] = old[ '''encoder/relpos_bias/rel_embedding''' ].T UpperCAmelCase_ : Optional[int] = old['''encoder/encoder_norm/scale'''] if not is_encoder_only: # Decoder. for i in range(A ): # Block i, layer 0 (Self Attention). UpperCAmelCase_ : str = tax_layer_norm_lookup(A , A , '''decoder''' , '''pre_self_attention_layer_norm''' ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = tax_attention_lookup(A , A , '''decoder''' , '''self_attention''' ) UpperCAmelCase_ : Tuple = layer_norm UpperCAmelCase_ : Optional[int] = k.T UpperCAmelCase_ : List[str] = o.T UpperCAmelCase_ : List[Any] = q.T UpperCAmelCase_ : List[str] = v.T # Block i, layer 1 (Cross Attention). UpperCAmelCase_ : str = tax_layer_norm_lookup(A , A , '''decoder''' , '''pre_cross_attention_layer_norm''' ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = tax_attention_lookup(A , A , '''decoder''' , '''encoder_decoder_attention''' ) UpperCAmelCase_ : Optional[int] = layer_norm UpperCAmelCase_ : str = k.T UpperCAmelCase_ : Optional[int] = o.T UpperCAmelCase_ : List[Any] = q.T UpperCAmelCase_ : List[str] = v.T # Block i, layer 2 (MLP). UpperCAmelCase_ : List[Any] = tax_layer_norm_lookup(A , A , '''decoder''' , '''pre_mlp_layer_norm''' ) UpperCAmelCase_ , UpperCAmelCase_ : Dict = tax_mlp_lookup(A , A , '''decoder''' , A ) UpperCAmelCase_ : List[Any] = layer_norm if split_mlp_wi: UpperCAmelCase_ : Union[str, Any] = wi[0].T UpperCAmelCase_ : Any = wi[1].T else: UpperCAmelCase_ : List[str] = wi.T UpperCAmelCase_ : Dict = wo.T UpperCAmelCase_ : Optional[Any] = old['''decoder/decoder_norm/scale'''] UpperCAmelCase_ : List[str] = old[ '''decoder/relpos_bias/rel_embedding''' ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: UpperCAmelCase_ : Union[str, Any] = old['''decoder/logits_dense/kernel'''].T return new def __UpperCAmelCase ( A : Any , A : bool ) -> Any: UpperCAmelCase_ : Dict = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: UpperCAmelCase_ : str = state_dict['''shared.weight'''] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: UpperCAmelCase_ : List[Any] = state_dict['''shared.weight'''] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('''Using shared word embeddings as lm_head.''' ) UpperCAmelCase_ : Dict = state_dict['''shared.weight'''] return state_dict def __UpperCAmelCase ( A : Union[str, Any] , A : Dict , A : str , A : str ) -> Union[str, Any]: UpperCAmelCase_ : str = checkpoints.load_tax_checkpoint(A ) UpperCAmelCase_ : int = convert_tax_to_pytorch(A , num_layers=config.num_layers , is_encoder_only=A ) UpperCAmelCase_ : List[Any] = make_state_dict(A , A ) model.load_state_dict(A , strict=A ) def __UpperCAmelCase ( A : Any , A : Optional[Any] , A : Tuple , A : bool = False ) -> Any: UpperCAmelCase_ : int = TaConfig.from_json_file(A ) print(F"Building PyTorch model from configuration: {config}" ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: UpperCAmelCase_ : int = TaEncoderModel(A ) else: UpperCAmelCase_ : List[str] = TaForConditionalGeneration(A ) # Load weights from tf checkpoint load_tax_weights_in_ta(A , A , A , A ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(A ) # Verify that we can load the checkpoint. model.from_pretrained(A ) print('''Done''' ) if __name__ == "__main__": _UpperCamelCase : List[str] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) _UpperCamelCase : Optional[int] = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
304
'''simple docstring''' from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class snake_case__ : a_ = 42 # [batch_size x 3] a_ = 42 # [batch_size x 3] a_ = 42 # [batch_size x 3] a_ = 42 # [batch_size x 3] a_ = 42 a_ = 42 a_ = 42 a_ = 42 a_ = 42 def A ( self : Tuple ) -> Optional[int]: assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def A ( self : List[Any] ) -> Union[str, Any]: return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def A ( self : Any ) -> Optional[Any]: return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def A ( self : Optional[int] ) -> torch.Tensor: UpperCAmelCase_ : Dict = torch.arange(self.height * self.width ) UpperCAmelCase_ : int = torch.stack( [ pixel_indices % self.width, torch.div(_A , self.width , rounding_mode='''trunc''' ), ] , axis=1 , ) return coords @property def A ( self : Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ , *UpperCAmelCase_ : Union[str, Any] = self.shape UpperCAmelCase_ : Optional[Any] = int(np.prod(_A ) ) UpperCAmelCase_ : Any = self.get_image_coords() UpperCAmelCase_ : Any = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) UpperCAmelCase_ : Union[str, Any] = self.get_camera_rays(_A ) UpperCAmelCase_ : str = rays.view(_A , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def A ( self : Optional[int] , _A : torch.Tensor ) -> torch.Tensor: UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] UpperCAmelCase_ : Dict = coords.view(_A , -1 , 2 ) UpperCAmelCase_ : Union[str, Any] = self.resolution() UpperCAmelCase_ : int = self.fov() UpperCAmelCase_ : Dict = (flat.float() / (res - 1)) * 2 - 1 UpperCAmelCase_ : Optional[int] = fracs * torch.tan(fov / 2 ) UpperCAmelCase_ : Any = fracs.view(_A , -1 , 2 ) UpperCAmelCase_ : List[Any] = ( self.z.view(_A , 1 , 3 ) + self.x.view(_A , 1 , 3 ) * fracs[:, :, :1] + self.y.view(_A , 1 , 3 ) * fracs[:, :, 1:] ) UpperCAmelCase_ : Optional[Any] = directions / directions.norm(dim=-1 , keepdim=_A ) UpperCAmelCase_ : Union[str, Any] = torch.stack( [ torch.broadcast_to(self.origin.view(_A , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(_A , *_A , 2 , 3 ) def A ( self : Tuple , _A : int , _A : int ) -> "DifferentiableProjectiveCamera": assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=_A , height=_A , x_fov=self.x_fov , y_fov=self.y_fov , ) def __UpperCAmelCase ( A : int ) -> DifferentiableProjectiveCamera: UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : str = [] for theta in np.linspace(0 , 2 * np.pi , num=2_0 ): UpperCAmelCase_ : str = np.array([np.sin(A ), np.cos(A ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) UpperCAmelCase_ : Optional[int] = -z * 4 UpperCAmelCase_ : Optional[int] = np.array([np.cos(A ), -np.sin(A ), 0.0] ) UpperCAmelCase_ : List[Any] = np.cross(A , A ) origins.append(A ) xs.append(A ) ys.append(A ) zs.append(A ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(A , axis=0 ) ).float() , x=torch.from_numpy(np.stack(A , axis=0 ) ).float() , y=torch.from_numpy(np.stack(A , axis=0 ) ).float() , z=torch.from_numpy(np.stack(A , axis=0 ) ).float() , width=A , height=A , x_fov=0.7 , y_fov=0.7 , shape=(1, len(A )) , )
304
1
'''simple docstring''' import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class snake_case__ ( UpperCamelCase): # to overwrite at feature extractactor specific tests a_ = None a_ = None @property def A ( self : List[Any] ) -> Optional[Any]: return self.feat_extract_tester.prepare_feat_extract_dict() def A ( self : str ) -> Optional[int]: UpperCAmelCase_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(_A , '''feature_size''' ) ) self.assertTrue(hasattr(_A , '''sampling_rate''' ) ) self.assertTrue(hasattr(_A , '''padding_value''' ) ) def A ( self : int ) -> int: UpperCAmelCase_ : Dict = self.feat_extract_tester.prepare_inputs_for_common() UpperCAmelCase_ : str = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase_ : Dict = feat_extract.model_input_names[0] UpperCAmelCase_ : str = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(_A ) == len(_A ) for x, y in zip(_A , processed_features[input_name] ) ) ) UpperCAmelCase_ : List[str] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_A ) UpperCAmelCase_ : List[str] = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' ) UpperCAmelCase_ : List[Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: UpperCAmelCase_ : List[Any] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def A ( self : int ) -> Any: UpperCAmelCase_ : Dict = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_A ) UpperCAmelCase_ : Dict = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase_ : Dict = feat_extract.model_input_names[0] UpperCAmelCase_ : Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' ) UpperCAmelCase_ : List[str] = processed_features[input_name] if len(batch_features_input.shape ) < 3: UpperCAmelCase_ : List[Any] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def A ( self : List[str] ) -> Optional[int]: UpperCAmelCase_ : str = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_A ) UpperCAmelCase_ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase_ : int = feat_extract.model_input_names[0] UpperCAmelCase_ : int = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' ) UpperCAmelCase_ : Optional[int] = processed_features[input_name] if len(batch_features_input.shape ) < 3: UpperCAmelCase_ : Union[str, Any] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def A ( self : Dict , _A : Optional[Any]=False ) -> Dict: def _inputs_have_equal_length(_A : int ): UpperCAmelCase_ : List[Any] = len(input[0] ) for input_slice in input[1:]: if len(_A ) != length: return False return True def _inputs_are_equal(_A : int , _A : Any ): if len(_A ) != len(_A ): return False for input_slice_a, input_slice_a in zip(_A , _A ): if not np.allclose(np.asarray(_A ) , np.asarray(_A ) , atol=1e-3 ): return False return True UpperCAmelCase_ : str = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase_ : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=_A ) UpperCAmelCase_ : Dict = feat_extract.model_input_names[0] UpperCAmelCase_ : Optional[Any] = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase_ : str = self.feat_extract_tester.seq_length_diff UpperCAmelCase_ : List[str] = self.feat_extract_tester.max_seq_length + pad_diff UpperCAmelCase_ : Tuple = self.feat_extract_tester.min_seq_length UpperCAmelCase_ : Optional[int] = self.feat_extract_tester.batch_size UpperCAmelCase_ : Dict = self.feat_extract_tester.feature_size # test padding for List[int] + numpy UpperCAmelCase_ : Tuple = feat_extract.pad(_A , padding=_A ) UpperCAmelCase_ : int = input_a[input_name] UpperCAmelCase_ : Any = feat_extract.pad(_A , padding='''longest''' ) UpperCAmelCase_ : Optional[int] = input_a[input_name] UpperCAmelCase_ : Tuple = feat_extract.pad(_A , padding='''max_length''' , max_length=len(speech_inputs[-1] ) ) UpperCAmelCase_ : str = input_a[input_name] UpperCAmelCase_ : Dict = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' ) UpperCAmelCase_ : str = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(_A ): feat_extract.pad(_A , padding='''max_length''' )[input_name] UpperCAmelCase_ : List[Any] = feat_extract.pad( _A , padding='''max_length''' , max_length=_A , return_tensors='''np''' ) UpperCAmelCase_ : List[str] = input_a[input_name] self.assertFalse(_inputs_have_equal_length(_A ) ) self.assertTrue(_inputs_have_equal_length(_A ) ) self.assertTrue(_inputs_have_equal_length(_A ) ) self.assertTrue(_inputs_are_equal(_A , _A ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy UpperCAmelCase_ : Any = feat_extract.pad(_A , pad_to_multiple_of=10 ) UpperCAmelCase_ : Dict = input_a[input_name] UpperCAmelCase_ : List[Any] = feat_extract.pad(_A , padding='''longest''' , pad_to_multiple_of=10 ) UpperCAmelCase_ : Tuple = input_a[input_name] UpperCAmelCase_ : Optional[Any] = feat_extract.pad( _A , padding='''max_length''' , pad_to_multiple_of=10 , max_length=_A ) UpperCAmelCase_ : int = input_a[input_name] UpperCAmelCase_ : Optional[int] = feat_extract.pad( _A , padding='''max_length''' , pad_to_multiple_of=10 , max_length=_A , return_tensors='''np''' , ) UpperCAmelCase_ : List[Any] = input_a[input_name] self.assertTrue(all(len(_A ) % 10 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(_A , _A ) ) UpperCAmelCase_ : List[str] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(_A ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct UpperCAmelCase_ : List[str] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1e-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1e-3 ) def A ( self : Optional[Any] , _A : Union[str, Any]=False ) -> Union[str, Any]: def _inputs_have_equal_length(_A : Union[str, Any] ): UpperCAmelCase_ : Dict = len(input[0] ) for input_slice in input[1:]: if len(_A ) != length: return False return True def _inputs_are_equal(_A : List[Any] , _A : Union[str, Any] ): if len(_A ) != len(_A ): return False for input_slice_a, input_slice_a in zip(_A , _A ): if not np.allclose(np.asarray(_A ) , np.asarray(_A ) , atol=1e-3 ): return False return True UpperCAmelCase_ : Dict = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase_ : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=_A ) UpperCAmelCase_ : Any = feat_extract.model_input_names[0] UpperCAmelCase_ : int = BatchFeature({input_name: speech_inputs} ) # truncate to smallest UpperCAmelCase_ : List[Any] = feat_extract.pad( _A , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=_A ) UpperCAmelCase_ : str = input_a[input_name] UpperCAmelCase_ : Any = feat_extract.pad(_A , padding='''max_length''' , max_length=len(speech_inputs[0] ) ) UpperCAmelCase_ : Dict = input_a[input_name] self.assertTrue(_inputs_have_equal_length(_A ) ) self.assertFalse(_inputs_have_equal_length(_A ) ) # truncate to smallest with np UpperCAmelCase_ : Tuple = feat_extract.pad( _A , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=_A , ) UpperCAmelCase_ : str = input_a[input_name] UpperCAmelCase_ : List[str] = feat_extract.pad( _A , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' ) UpperCAmelCase_ : str = input_a[input_name] self.assertTrue(_inputs_have_equal_length(_A ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(_A ) ) # truncate to middle UpperCAmelCase_ : str = feat_extract.pad( _A , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=_A , return_tensors='''np''' , ) UpperCAmelCase_ : List[str] = input_a[input_name] UpperCAmelCase_ : Any = feat_extract.pad( _A , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=_A ) UpperCAmelCase_ : Union[str, Any] = input_a[input_name] UpperCAmelCase_ : Any = feat_extract.pad( _A , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' ) UpperCAmelCase_ : Union[str, Any] = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(_A ) ) self.assertTrue(_inputs_have_equal_length(_A ) ) self.assertTrue(_inputs_are_equal(_A , _A ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(_A ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(_A ): feat_extract.pad(_A , truncation=_A )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(_A ): feat_extract.pad(_A , padding='''longest''' , truncation=_A )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(_A ): feat_extract.pad(_A , padding='''longest''' , truncation=_A )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(_A ): feat_extract.pad(_A , padding='''max_length''' , truncation=_A )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy UpperCAmelCase_ : Tuple = 12 UpperCAmelCase_ : List[str] = feat_extract.pad( _A , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_A , truncation=_A , ) UpperCAmelCase_ : Optional[Any] = input_a[input_name] UpperCAmelCase_ : Dict = feat_extract.pad( _A , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_A , ) UpperCAmelCase_ : Optional[Any] = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of UpperCAmelCase_ : Any = len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: UpperCAmelCase_ : str = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(_A ) ) self.assertFalse(_inputs_have_equal_length(_A ) ) def A ( self : Optional[int] ) -> int: self._check_padding(numpify=_A ) def A ( self : Union[str, Any] ) -> Optional[Any]: self._check_padding(numpify=_A ) def A ( self : List[Any] ) -> Union[str, Any]: self._check_truncation(numpify=_A ) def A ( self : Optional[int] ) -> List[str]: self._check_truncation(numpify=_A ) @require_torch def A ( self : List[str] ) -> int: UpperCAmelCase_ : str = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase_ : int = self.feat_extract_tester.prepare_inputs_for_common() UpperCAmelCase_ : Any = feat_extract.model_input_names[0] UpperCAmelCase_ : str = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase_ : Optional[Any] = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )[input_name] UpperCAmelCase_ : str = feat_extract.pad(_A , padding='''longest''' , return_tensors='''pt''' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 ) @require_tf def A ( self : List[Any] ) -> Tuple: UpperCAmelCase_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) UpperCAmelCase_ : List[Any] = self.feat_extract_tester.prepare_inputs_for_common() UpperCAmelCase_ : int = feat_extract.model_input_names[0] UpperCAmelCase_ : List[str] = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase_ : str = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' )[input_name] UpperCAmelCase_ : Dict = feat_extract.pad(_A , padding='''longest''' , return_tensors='''tf''' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 ) def A ( self : int ) -> str: UpperCAmelCase_ : List[Any] = self.feat_extract_dict UpperCAmelCase_ : Tuple = True UpperCAmelCase_ : Optional[Any] = self.feature_extraction_class(**_A ) UpperCAmelCase_ : str = self.feat_extract_tester.prepare_inputs_for_common() UpperCAmelCase_ : int = [len(_A ) for x in speech_inputs] UpperCAmelCase_ : Dict = feat_extract.model_input_names[0] UpperCAmelCase_ : Optional[Any] = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase_ : Optional[Any] = feat_extract.pad(_A , padding='''longest''' , return_tensors='''np''' ) self.assertIn('''attention_mask''' , _A ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _A ) def A ( self : str ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = self.feat_extract_dict UpperCAmelCase_ : Any = True UpperCAmelCase_ : str = self.feature_extraction_class(**_A ) UpperCAmelCase_ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common() UpperCAmelCase_ : Union[str, Any] = [len(_A ) for x in speech_inputs] UpperCAmelCase_ : str = feat_extract.model_input_names[0] UpperCAmelCase_ : Any = BatchFeature({input_name: speech_inputs} ) UpperCAmelCase_ : int = min(_A ) UpperCAmelCase_ : Any = feat_extract.pad( _A , padding='''max_length''' , max_length=_A , truncation=_A , return_tensors='''np''' ) self.assertIn('''attention_mask''' , _A ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
304
'''simple docstring''' import random class snake_case__ : @staticmethod def A ( _A : str ) -> tuple[list[int], list[int]]: UpperCAmelCase_ : Dict = [ord(_A ) for i in text] UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : Any = [] for i in plain: UpperCAmelCase_ : int = random.randint(1 , 3_00 ) UpperCAmelCase_ : str = (i + k) * k cipher.append(_A ) key.append(_A ) return cipher, key @staticmethod def A ( _A : list[int] , _A : list[int] ) -> str: UpperCAmelCase_ : Dict = [] for i in range(len(_A ) ): UpperCAmelCase_ : int = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(_A ) ) return "".join(_A ) if __name__ == "__main__": _UpperCamelCase , _UpperCamelCase : Any = Onepad().encrypt('Hello') print(c, k) print(Onepad().decrypt(c, k))
304
1
'''simple docstring''' import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json _UpperCamelCase : Any = 'sshleifer/mar_enro_6_3_student' class snake_case__ ( UpperCamelCase): def A ( self : Optional[Any] ) -> str: super().setUp() UpperCAmelCase_ : List[Any] = cached_path( '''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_A , ) UpperCAmelCase_ : List[Any] = F"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k" @slow @require_torch_gpu def A ( self : Union[str, Any] ) -> str: MarianMTModel.from_pretrained(_A ) @slow @require_torch_gpu def A ( self : int ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = { '''$MAX_LEN''': 64, '''$BS''': 64, '''$GAS''': 1, '''$ENRO_DIR''': self.data_dir, '''facebook/mbart-large-cc25''': MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", '''--learning_rate=3e-5''': '''--learning_rate 3e-4''', '''--num_train_epochs 6''': '''--num_train_epochs 1''', } # Clean up bash script UpperCAmelCase_ : List[str] = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip() UpperCAmelCase_ : Optional[int] = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' ) for k, v in env_vars_to_replace.items(): UpperCAmelCase_ : Dict = bash_script.replace(_A , str(_A ) ) UpperCAmelCase_ : List[Any] = self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") UpperCAmelCase_ : str = F"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split() # XXX: args.gpus > 1 : handle multi_gpu in the future UpperCAmelCase_ : Any = ['''finetune.py'''] + bash_script.split() + args with patch.object(_A , '''argv''' , _A ): UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser() UpperCAmelCase_ : Union[str, Any] = pl.Trainer.add_argparse_args(_A ) UpperCAmelCase_ : str = SummarizationModule.add_model_specific_args(_A , os.getcwd() ) UpperCAmelCase_ : Tuple = parser.parse_args() UpperCAmelCase_ : List[str] = main(_A ) # Check metrics UpperCAmelCase_ : Any = load_json(model.metrics_save_path ) UpperCAmelCase_ : Any = metrics['''val'''][0] UpperCAmelCase_ : List[str] = metrics['''val'''][-1] self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , _A ) self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict UpperCAmelCase_ : List[str] = os.listdir(_A ) UpperCAmelCase_ : Union[str, Any] = [x for x in contents if x.endswith('''.ckpt''' )][0] UpperCAmelCase_ : Union[str, Any] = os.path.join(args.output_dir , _A ) UpperCAmelCase_ : int = torch.load(_A , map_location='''cpu''' ) UpperCAmelCase_ : Optional[Any] = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight''' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: UpperCAmelCase_ : Optional[Any] = {os.path.basename(_A ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1 class snake_case__ ( UpperCamelCase): @timeout_decorator.timeout(6_00 ) @slow @require_torch_gpu def A ( self : List[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = F"{self.test_file_dir_str}/test_data/wmt_en_ro" UpperCAmelCase_ : Tuple = { '''--fp16_opt_level=O1''': '''''', '''$MAX_LEN''': 1_28, '''$BS''': 16, '''$GAS''': 1, '''$ENRO_DIR''': data_dir, '''$m''': '''sshleifer/student_marian_en_ro_6_1''', '''val_check_interval=0.25''': '''val_check_interval=1.0''', } # Clean up bash script UpperCAmelCase_ : Tuple = ( (self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip() ) UpperCAmelCase_ : List[str] = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' ) UpperCAmelCase_ : List[str] = bash_script.replace('''--fp16 ''' , ''' ''' ) for k, v in env_vars_to_replace.items(): UpperCAmelCase_ : Optional[Any] = bash_script.replace(_A , str(_A ) ) UpperCAmelCase_ : Optional[int] = self.get_auto_remove_tmp_dir() UpperCAmelCase_ : str = bash_script.replace('''--fp16''' , '''''' ) UpperCAmelCase_ : Tuple = 6 UpperCAmelCase_ : Dict = ( ['''distillation.py'''] + bash_script.split() + [ F"--output_dir={output_dir}", '''--gpus=1''', '''--learning_rate=1e-3''', F"--num_train_epochs={epochs}", '''--warmup_steps=10''', '''--val_check_interval=1.0''', '''--do_predict''', ] ) with patch.object(_A , '''argv''' , _A ): UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser() UpperCAmelCase_ : List[str] = pl.Trainer.add_argparse_args(_A ) UpperCAmelCase_ : int = SummarizationDistiller.add_model_specific_args(_A , os.getcwd() ) UpperCAmelCase_ : List[str] = parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu UpperCAmelCase_ : List[str] = distill_main(_A ) # Check metrics UpperCAmelCase_ : Any = load_json(model.metrics_save_path ) UpperCAmelCase_ : str = metrics['''val'''][0] UpperCAmelCase_ : List[str] = metrics['''val'''][-1] assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , _A ) # check lightning ckpt can be loaded and has a reasonable statedict UpperCAmelCase_ : str = os.listdir(_A ) UpperCAmelCase_ : Any = [x for x in contents if x.endswith('''.ckpt''' )][0] UpperCAmelCase_ : Dict = os.path.join(args.output_dir , _A ) UpperCAmelCase_ : List[str] = torch.load(_A , map_location='''cpu''' ) UpperCAmelCase_ : List[str] = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight''' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: UpperCAmelCase_ : List[str] = {os.path.basename(_A ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1
304
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _UpperCamelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class snake_case__ ( UpperCamelCase , unittest.TestCase): a_ = ReformerTokenizer a_ = ReformerTokenizerFast a_ = True a_ = False a_ = True def A ( self : Optional[Any] ) -> List[Any]: super().setUp() UpperCAmelCase_ : Tuple = ReformerTokenizer(_A , keep_accents=_A ) tokenizer.save_pretrained(self.tmpdirname ) def A ( self : Optional[Any] ) -> Any: UpperCAmelCase_ : List[Any] = '''<s>''' UpperCAmelCase_ : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def A ( self : Any ) -> str: UpperCAmelCase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(_A ) , 10_00 ) def A ( self : Optional[int] ) -> int: self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def A ( self : Optional[Any] ) -> List[Any]: if not self.test_rust_tokenizer: return UpperCAmelCase_ : int = self.get_tokenizer() UpperCAmelCase_ : Tuple = self.get_rust_tokenizer() UpperCAmelCase_ : Any = '''I was born in 92000, and this is falsé.''' UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize(_A ) UpperCAmelCase_ : Optional[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) UpperCAmelCase_ : List[str] = tokenizer.encode(_A , add_special_tokens=_A ) UpperCAmelCase_ : int = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) UpperCAmelCase_ : Tuple = self.get_rust_tokenizer() UpperCAmelCase_ : Dict = tokenizer.encode(_A ) UpperCAmelCase_ : List[str] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def A ( self : Tuple , _A : Dict=15 ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(_A , **_A ) # Simple input UpperCAmelCase_ : Optional[int] = '''This is a simple input''' UpperCAmelCase_ : List[str] = ['''This is a simple input 1''', '''This is a simple input 2'''] UpperCAmelCase_ : Union[str, Any] = ('''This is a simple input''', '''This is a pair''') UpperCAmelCase_ : Dict = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' ) # Simple input self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' ) # Simple input self.assertRaises( _A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , ) # Pair input self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' ) # Pair input self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' ) # Pair input self.assertRaises( _A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , ) def A ( self : Union[str, Any] ) -> int: pass def A ( self : int ) -> Any: UpperCAmelCase_ : Any = ReformerTokenizer(_A , keep_accents=_A ) UpperCAmelCase_ : List[str] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_A ) , [2_85, 46, 10, 1_70, 3_82] , ) UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) UpperCAmelCase_ : List[str] = tokenizer.convert_tokens_to_ids(_A ) self.assertListEqual( _A , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def A ( self : List[str] ) -> Optional[int]: return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' ) @slow def A ( self : str ) -> str: UpperCAmelCase_ : Tuple = '''Hello World!''' UpperCAmelCase_ : int = [1_26, 32, 2_62, 1_52, 38, 72, 2_87] self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @slow def A ( self : List[Any] ) -> str: UpperCAmelCase_ : Tuple = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) UpperCAmelCase_ : int = [ 1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 35, 28, 2_75, 3, 2_59, 2_97, 2_60, 84, 4, 35, 1_10, 44, 8, 2_59, 91, 2_68, 21, 11, 2_09, 2_74, 1_09, 2_66, 2_77, 1_17, 86, 93, 3_15, 2_58, 2_78, 2_58, 2_77, 2_58, 0, 2_58, 2_88, 2_58, 3_19, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 2_87, 2_58, 3_15, 2_58, 2_89, 2_58, 2_78, 99, 2_69, 2_66, 2_62, 8, 2_59, 2_41, 4, 2_17, 2_30, 2_68, 2_66, 55, 1_68, 1_06, 75, 1_93, 2_66, 2_23, 27, 49, 26, 2_82, 25, 2_64, 2_99, 19, 26, 0, 2_58, 2_77, 1_17, 86, 93, 1_76, 1_83, 2_70, 11, 2_62, 42, 61, 2_65, ] self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @require_torch @slow def A ( self : List[str] ) -> Optional[int]: import torch from transformers import ReformerConfig, ReformerModel # Build sequence UpperCAmelCase_ : int = list(self.big_tokenizer.get_vocab().keys() )[:10] UpperCAmelCase_ : List[Any] = ''' '''.join(_A ) UpperCAmelCase_ : str = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' ) UpperCAmelCase_ : Any = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' ) UpperCAmelCase_ : List[Any] = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) UpperCAmelCase_ : Any = encoded_sequence['''input_ids'''].shape UpperCAmelCase_ : Optional[int] = ReformerModel(_A ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**_A ) model(**_A ) @slow def A ( self : int ) -> Optional[Any]: # fmt: off UpperCAmelCase_ : int = {'''input_ids''': [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 UpperCAmelCase_ : Optional[Any] = [ '''This is a very simple sentence.''', '''The quick brown fox jumps over the lazy dog.''', ] self.tokenizer_integration_test_util( expected_encoding=_A , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=_A , sequences=_A , )
304
1
'''simple docstring''' import argparse from collections import defaultdict import yaml _UpperCamelCase : Tuple = 'docs/source/en/_toctree.yml' def __UpperCAmelCase ( A : Union[str, Any] ) -> List[Any]: UpperCAmelCase_ : Dict = defaultdict(A ) UpperCAmelCase_ : Union[str, Any] = [] UpperCAmelCase_ : Dict = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} ) else: new_doc_list.append(A ) UpperCAmelCase_ : List[Any] = new_doc_list UpperCAmelCase_ : List[str] = [key for key, value in counts.items() if value > 1] UpperCAmelCase_ : Optional[Any] = [] for duplicate_key in duplicates: UpperCAmelCase_ : str = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} ) if len(A ) > 1: raise ValueError( F"{duplicate_key} is present several times in the documentation table of content at " '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] ) UpperCAmelCase_ : int = sorted(A , key=lambda A : s["title"].lower() ) # "overview" gets special treatment and is always first if len(A ) > 1: raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' ) overview_doc.extend(A ) # Sort return overview_doc def __UpperCAmelCase ( A : List[Any]=False ) -> Union[str, Any]: with open(A , encoding='''utf-8''' ) as f: UpperCAmelCase_ : Union[str, Any] = yaml.safe_load(f.read() ) # Get to the API doc UpperCAmelCase_ : List[Any] = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCAmelCase_ : Optional[int] = content[api_idx]['''sections'''] # Then to the model doc UpperCAmelCase_ : int = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 UpperCAmelCase_ : int = api_doc[scheduler_idx]['''sections'''] UpperCAmelCase_ : int = clean_doc_toc(A ) UpperCAmelCase_ : str = False if new_scheduler_doc != scheduler_doc: UpperCAmelCase_ : List[str] = True if overwrite: UpperCAmelCase_ : Tuple = new_scheduler_doc if diff: if overwrite: UpperCAmelCase_ : Tuple = api_doc with open(A , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(A , allow_unicode=A ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) def __UpperCAmelCase ( A : Any=False ) -> Tuple: with open(A , encoding='''utf-8''' ) as f: UpperCAmelCase_ : Optional[int] = yaml.safe_load(f.read() ) # Get to the API doc UpperCAmelCase_ : Optional[Any] = 0 while content[api_idx]["title"] != "API": api_idx += 1 UpperCAmelCase_ : Optional[int] = content[api_idx]['''sections'''] # Then to the model doc UpperCAmelCase_ : str = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 UpperCAmelCase_ : List[str] = False UpperCAmelCase_ : List[str] = api_doc[pipeline_idx]['''sections'''] UpperCAmelCase_ : Optional[int] = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: UpperCAmelCase_ : Any = pipeline_doc['''section'''] UpperCAmelCase_ : Optional[Any] = clean_doc_toc(A ) if overwrite: UpperCAmelCase_ : Dict = new_sub_pipeline_doc new_pipeline_docs.append(A ) # sort overall pipeline doc UpperCAmelCase_ : Any = clean_doc_toc(A ) if new_pipeline_docs != pipeline_docs: UpperCAmelCase_ : Tuple = True if overwrite: UpperCAmelCase_ : Union[str, Any] = new_pipeline_docs if diff: if overwrite: UpperCAmelCase_ : Union[str, Any] = api_doc with open(A , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(A , allow_unicode=A ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": _UpperCamelCase : str = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') _UpperCamelCase : Tuple = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
304
'''simple docstring''' from __future__ import annotations def __UpperCAmelCase ( A : str ) -> list[int]: return [ord(A ) - 9_6 for elem in plain] def __UpperCAmelCase ( A : list[int] ) -> str: return "".join(chr(elem + 9_6 ) for elem in encoded ) def __UpperCAmelCase ( ) -> None: UpperCAmelCase_ : Tuple = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''' , A ) print('''Decoded:''' , decode(A ) ) if __name__ == "__main__": main()
304
1
'''simple docstring''' from __future__ import annotations def __UpperCAmelCase ( A : list , A : int , A : int , A : int ) -> list: UpperCAmelCase_ : Any = [] UpperCAmelCase_ , UpperCAmelCase_ : Tuple = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) UpperCAmelCase_ : List[Any] = result + left + right return input_list def __UpperCAmelCase ( A : list ) -> list: if len(A ) <= 1: return input_list UpperCAmelCase_ : List[str] = list(A ) # iteration for two-way merging UpperCAmelCase_ : Tuple = 2 while p <= len(A ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(A ) , A ): UpperCAmelCase_ : Union[str, Any] = i UpperCAmelCase_ : int = i + p - 1 UpperCAmelCase_ : Any = (low + high + 1) // 2 UpperCAmelCase_ : Union[str, Any] = merge(A , A , A , A ) # final merge of last two parts if p * 2 >= len(A ): UpperCAmelCase_ : str = i UpperCAmelCase_ : Tuple = merge(A , 0 , A , len(A ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": _UpperCamelCase : str = input('Enter numbers separated by a comma:\n').strip() if user_input == "": _UpperCamelCase : List[str] = [] else: _UpperCamelCase : Optional[int] = [int(item.strip()) for item in user_input.split(',')] print(iter_merge_sort(unsorted))
304
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
304
1
'''simple docstring''' import argparse _UpperCamelCase : List[Any] = 'docs/source/_static/js/custom.js' def __UpperCAmelCase ( A : Any ) -> Optional[int]: with open(A , encoding='''utf-8''' , newline='''\n''' ) as f: UpperCAmelCase_ : int = f.readlines() UpperCAmelCase_ : List[Any] = 0 # First let's put the right version while not lines[index].startswith('''const stableVersion =''' ): index += 1 UpperCAmelCase_ : int = F"const stableVersion = \"v{version}\"\n" # Then update the dictionary while not lines[index].startswith('''const versionMapping = {''' ): index += 1 # We go until the end while not lines[index].startswith('''}''' ): index += 1 # We add the new version at the end lines[index - 1] += F" \"v{version}\": \"v{version}\",\n" with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(A ) if __name__ == "__main__": _UpperCamelCase : int = argparse.ArgumentParser() parser.add_argument('--version', help='Release version.') _UpperCamelCase : str = parser.parse_args() update_custom_js(args.version)
304
'''simple docstring''' def __UpperCAmelCase ( A : int ) -> list: # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError('''The given input must be positive''' ) # get the generated string sequence UpperCAmelCase_ : int = gray_code_sequence_string(A ) # # convert them to integers for i in range(len(A ) ): UpperCAmelCase_ : List[str] = int(sequence[i] , 2 ) return sequence def __UpperCAmelCase ( A : int ) -> list: # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] UpperCAmelCase_ : Tuple = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits UpperCAmelCase_ : List[str] = gray_code_sequence_string(bit_count - 1 ) UpperCAmelCase_ : int = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): UpperCAmelCase_ : Union[str, Any] = '''0''' + smaller_sequence[i] sequence.append(A ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): UpperCAmelCase_ : Dict = '''1''' + smaller_sequence[i] sequence.append(A ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
304
1
'''simple docstring''' import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger _UpperCamelCase : Any = '<<<<<<< This should probably be modified because it mentions: ' _UpperCamelCase : Optional[Any] = '=======\n>>>>>>>\n' _UpperCamelCase : List[Any] = [ 'TextEncoderConfig', 'ByteTextEncoder', 'SubwordTextEncoder', 'encoder_config', 'maybe_build_from_corpus', 'manual_dir', ] _UpperCamelCase : str = [ # (pattern, replacement) # Order is important here for some replacements (R'tfds\.core', R'datasets'), (R'tf\.io\.gfile\.GFile', R'open'), (R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'), (R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'), (R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'), (R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('), (R'tfds\.features\.FeaturesDict\(', R'dict('), (R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'), (R'tfds\.', R'datasets.'), (R'dl_manager\.manual_dir', R'self.config.data_dir'), (R'self\.builder_config', R'self.config'), ] def __UpperCAmelCase ( A : Namespace ) -> Dict: return ConvertCommand(args.tfds_path , args.datasets_directory ) class snake_case__ ( UpperCamelCase): @staticmethod def A ( _A : ArgumentParser ) -> Optional[Any]: UpperCAmelCase_ : int = parser.add_parser( '''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , ) train_parser.add_argument( '''--tfds_path''' , type=_A , required=_A , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , ) train_parser.add_argument( '''--datasets_directory''' , type=_A , required=_A , help='''Path to the HuggingFace Datasets folder.''' ) train_parser.set_defaults(func=_A ) def __init__( self : Dict , _A : str , _A : str , *_A : Dict ) -> Optional[Any]: UpperCAmelCase_ : int = get_logger('''datasets-cli/converting''' ) UpperCAmelCase_ : Tuple = tfds_path UpperCAmelCase_ : List[str] = datasets_directory def A ( self : str ) -> Optional[Any]: if os.path.isdir(self._tfds_path ): UpperCAmelCase_ : Optional[int] = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): UpperCAmelCase_ : List[str] = os.path.dirname(self._tfds_path ) else: raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' ) UpperCAmelCase_ : Dict = os.path.abspath(self._datasets_directory ) self._logger.info(F"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" ) UpperCAmelCase_ : Union[str, Any] = [] UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : List[str] = {} if os.path.isdir(self._tfds_path ): UpperCAmelCase_ : Tuple = os.listdir(_A ) else: UpperCAmelCase_ : List[str] = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(F"Looking at file {f_name}" ) UpperCAmelCase_ : Tuple = os.path.join(_A , _A ) UpperCAmelCase_ : Union[str, Any] = os.path.join(_A , _A ) if not os.path.isfile(_A ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('''Skipping file''' ) continue with open(_A , encoding='''utf-8''' ) as f: UpperCAmelCase_ : List[str] = f.readlines() UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : Union[str, Any] = False UpperCAmelCase_ : Dict = False UpperCAmelCase_ : Union[str, Any] = [] for line in lines: UpperCAmelCase_ : Any = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: UpperCAmelCase_ : Optional[Any] = '''import datasets\n''' elif "import tensorflow" in out_line: # order is important here UpperCAmelCase_ : Optional[int] = '''''' continue elif "from absl import logging" in out_line: UpperCAmelCase_ : Tuple = '''from datasets import logging\n''' elif "getLogger" in out_line: UpperCAmelCase_ : int = out_line.replace('''getLogger''' , '''get_logger''' ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): UpperCAmelCase_ : Dict = True UpperCAmelCase_ : int = list(filter(lambda _A : e in out_line , _A ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_A ) + '''\n''' ) out_lines.append(_A ) out_lines.append(_A ) continue else: for pattern, replacement in TO_CONVERT: UpperCAmelCase_ : Optional[int] = re.sub(_A , _A , _A ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: UpperCAmelCase_ : Union[str, Any] = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _A ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) ) UpperCAmelCase_ : int = '''from . import ''' + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(F"Error converting {out_line.strip()}" ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: UpperCAmelCase_ : Dict = True out_lines.append(_A ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset UpperCAmelCase_ : Any = f_name.replace('''.py''' , '''''' ) UpperCAmelCase_ : str = os.path.join(_A , _A ) UpperCAmelCase_ : int = os.path.join(_A , _A ) os.makedirs(_A , exist_ok=_A ) self._logger.info(F"Adding directory {output_dir}" ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(_A ) if needs_manual_update: with_manual_update.append(_A ) with open(_A , '''w''' , encoding='''utf-8''' ) as f: f.writelines(_A ) self._logger.info(F"Converted in {output_file}" ) for utils_file in utils_files: try: UpperCAmelCase_ : Optional[int] = os.path.basename(_A ) UpperCAmelCase_ : int = imports_to_builder_map[f_name.replace('''.py''' , '''''' )] self._logger.info(F"Moving {dest_folder} to {utils_file}" ) shutil.copy(_A , _A ) except KeyError: self._logger.error(F"Cannot find destination folder for {utils_file}. Please copy manually." ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( F"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
304
'''simple docstring''' import logging from transformers.configuration_utils import PretrainedConfig _UpperCamelCase : Any = logging.getLogger(__name__) class snake_case__ ( UpperCamelCase): a_ = "masked_bert" def __init__( self : str , _A : Dict=3_05_22 , _A : Dict=7_68 , _A : Union[str, Any]=12 , _A : str=12 , _A : str=30_72 , _A : Dict="gelu" , _A : int=0.1 , _A : Optional[Any]=0.1 , _A : Any=5_12 , _A : Union[str, Any]=2 , _A : Union[str, Any]=0.02 , _A : int=1e-12 , _A : Any=0 , _A : Any="topK" , _A : List[str]="constant" , _A : Dict=0.0 , **_A : int , ) -> Union[str, Any]: super().__init__(pad_token_id=_A , **_A ) UpperCAmelCase_ : Union[str, Any] = vocab_size UpperCAmelCase_ : str = hidden_size UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : Optional[int] = num_attention_heads UpperCAmelCase_ : Optional[Any] = hidden_act UpperCAmelCase_ : str = intermediate_size UpperCAmelCase_ : int = hidden_dropout_prob UpperCAmelCase_ : Tuple = attention_probs_dropout_prob UpperCAmelCase_ : Optional[Any] = max_position_embeddings UpperCAmelCase_ : List[str] = type_vocab_size UpperCAmelCase_ : str = initializer_range UpperCAmelCase_ : Union[str, Any] = layer_norm_eps UpperCAmelCase_ : Optional[int] = pruning_method UpperCAmelCase_ : Optional[int] = mask_init UpperCAmelCase_ : List[Any] = mask_scale
304
1
'''simple docstring''' import operator def __UpperCAmelCase ( A : list , A : bool = False , A : list | None = None ) -> list: UpperCAmelCase_ : Tuple = operator.lt if reverse else operator.gt UpperCAmelCase_ : Any = solution or [] if not arr: return solution UpperCAmelCase_ : Tuple = [arr.pop(0 )] for i, item in enumerate(A ): if _operator(A , sublist[-1] ): sublist.append(A ) arr.pop(A ) # merging sublist into solution list if not solution: solution.extend(A ) else: while sublist: UpperCAmelCase_ : Optional[int] = sublist.pop(0 ) for i, xx in enumerate(A ): if not _operator(A , A ): solution.insert(A , A ) break else: solution.append(A ) strand_sort(A , A , A ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
304
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase): a_ = StableDiffusionDiffEditPipeline a_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"} a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"} a_ = frozenset( []) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess a_ = frozenset([]) def A ( self : Tuple ) -> Optional[Any]: torch.manual_seed(0 ) UpperCAmelCase_ : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , ) UpperCAmelCase_ : Optional[Any] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , ) UpperCAmelCase_ : Optional[int] = DDIMInverseScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_zero=_A , ) torch.manual_seed(0 ) UpperCAmelCase_ : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) UpperCAmelCase_ : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , ) UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_A ) UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCAmelCase_ : Optional[int] = { '''unet''': unet, '''scheduler''': scheduler, '''inverse_scheduler''': inverse_scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def A ( self : str , _A : List[str] , _A : Any=0 ) -> str: UpperCAmelCase_ : Optional[Any] = floats_tensor((1, 16, 16) , rng=random.Random(_A ) ).to(_A ) UpperCAmelCase_ : Dict = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_A ) ).to(_A ) if str(_A ).startswith('''mps''' ): UpperCAmelCase_ : Any = torch.manual_seed(_A ) else: UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase_ : str = { '''prompt''': '''a dog and a newt''', '''mask_image''': mask, '''image_latents''': latents, '''generator''': generator, '''num_inference_steps''': 2, '''inpaint_strength''': 1.0, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def A ( self : Tuple , _A : Optional[Any] , _A : Optional[Any]=0 ) -> List[str]: UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ : int = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ) if str(_A ).startswith('''mps''' ): UpperCAmelCase_ : Dict = torch.manual_seed(_A ) else: UpperCAmelCase_ : Any = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase_ : Optional[Any] = { '''image''': image, '''source_prompt''': '''a cat and a frog''', '''target_prompt''': '''a dog and a newt''', '''generator''': generator, '''num_inference_steps''': 2, '''num_maps_per_mask''': 2, '''mask_encode_strength''': 1.0, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def A ( self : int , _A : Tuple , _A : List[str]=0 ) -> Any: UpperCAmelCase_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) UpperCAmelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ) if str(_A ).startswith('''mps''' ): UpperCAmelCase_ : Optional[int] = torch.manual_seed(_A ) else: UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase_ : Optional[int] = { '''image''': image, '''prompt''': '''a cat and a frog''', '''generator''': generator, '''num_inference_steps''': 2, '''inpaint_strength''': 1.0, '''guidance_scale''': 6.0, '''decode_latents''': True, '''output_type''': '''numpy''', } return inputs def A ( self : List[str] ) -> Optional[Any]: if not hasattr(self.pipeline_class , '''_optional_components''' ): return UpperCAmelCase_ : str = self.get_dummy_components() UpperCAmelCase_ : Any = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(_A , _A , _A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_A ) UpperCAmelCase_ : str = pipe(**_A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_A ) UpperCAmelCase_ : Any = self.pipeline_class.from_pretrained(_A ) pipe_loaded.to(_A ) pipe_loaded.set_progress_bar_config(disable=_A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(_A , _A ) is None , F"`{optional_component}` did not stay set to None after loading." , ) UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_A ) UpperCAmelCase_ : List[Any] = pipe_loaded(**_A )[0] UpperCAmelCase_ : Any = np.abs(output - output_loaded ).max() self.assertLess(_A , 1e-4 ) def A ( self : Tuple ) -> int: UpperCAmelCase_ : Optional[Any] = '''cpu''' UpperCAmelCase_ : Any = self.get_dummy_components() UpperCAmelCase_ : Optional[int] = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Union[str, Any] = self.get_dummy_mask_inputs(_A ) UpperCAmelCase_ : int = pipe.generate_mask(**_A ) UpperCAmelCase_ : Tuple = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) UpperCAmelCase_ : List[Any] = np.array([0] * 9 ) UpperCAmelCase_ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(_A , 1e-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def A ( self : str ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = '''cpu''' UpperCAmelCase_ : str = self.get_dummy_components() UpperCAmelCase_ : str = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Optional[Any] = self.get_dummy_inversion_inputs(_A ) UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images UpperCAmelCase_ : List[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase_ : int = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) UpperCAmelCase_ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_A , 1e-3 ) def A ( self : Tuple ) -> Optional[Any]: super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def A ( self : str ) -> Tuple: UpperCAmelCase_ : Any = '''cpu''' UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components() UpperCAmelCase_ : Any = {'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''} UpperCAmelCase_ : Any = DPMSolverMultistepScheduler(**_A ) UpperCAmelCase_ : Optional[Any] = DPMSolverMultistepInverseScheduler(**_A ) UpperCAmelCase_ : Union[str, Any] = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inversion_inputs(_A ) UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images UpperCAmelCase_ : Tuple = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase_ : List[Any] = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) UpperCAmelCase_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_A , 1e-3 ) @require_torch_gpu @slow class snake_case__ ( unittest.TestCase): def A ( self : Optional[Any] ) -> Optional[int]: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def A ( cls : Dict ) -> List[Any]: UpperCAmelCase_ : Optional[int] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' ) UpperCAmelCase_ : int = raw_image.convert('''RGB''' ).resize((7_68, 7_68) ) UpperCAmelCase_ : Any = raw_image def A ( self : List[Any] ) -> List[str]: UpperCAmelCase_ : int = torch.manual_seed(0 ) UpperCAmelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa ) UpperCAmelCase_ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase_ : List[str] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit''' UpperCAmelCase_ : Tuple = '''a bowl of pears''' UpperCAmelCase_ : Optional[int] = pipe.generate_mask( image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , ) UpperCAmelCase_ : List[str] = pipe.invert( prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A ).latents UpperCAmelCase_ : Any = pipe( prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0] UpperCAmelCase_ : str = ( np.array( load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/diffedit/pears.png''' ).resize((7_68, 7_68) ) ) / 2_55 ) assert np.abs((expected_image - image).max() ) < 5e-1 def A ( self : Tuple ) -> List[str]: UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Any = StableDiffusionDiffEditPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa ) UpperCAmelCase_ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase_ : Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit''' UpperCAmelCase_ : Dict = '''a bowl of pears''' UpperCAmelCase_ : Union[str, Any] = pipe.generate_mask( image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , ) UpperCAmelCase_ : List[Any] = pipe.invert( prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A , num_inference_steps=25 , ).latents UpperCAmelCase_ : Dict = pipe( prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0] UpperCAmelCase_ : Tuple = ( np.array( load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/diffedit/pears.png''' ).resize((7_68, 7_68) ) ) / 2_55 ) assert np.abs((expected_image - image).max() ) < 5e-1
304
1
'''simple docstring''' import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class snake_case__ ( UpperCamelCase , unittest.TestCase): a_ = DDIMPipeline a_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS a_ = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "latents", "callback", "callback_steps", } a_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS a_ = False def A ( self : List[Any] ) -> Dict: torch.manual_seed(0 ) UpperCAmelCase_ : List[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) UpperCAmelCase_ : List[Any] = DDIMScheduler() UpperCAmelCase_ : int = {'''unet''': unet, '''scheduler''': scheduler} return components def A ( self : List[Any] , _A : str , _A : List[Any]=0 ) -> Optional[int]: if str(_A ).startswith('''mps''' ): UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(_A ) else: UpperCAmelCase_ : str = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase_ : Optional[int] = { '''batch_size''': 1, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def A ( self : int ) -> int: UpperCAmelCase_ : Optional[int] = '''cpu''' UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components() UpperCAmelCase_ : Union[str, Any] = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : int = self.get_dummy_inputs(_A ) UpperCAmelCase_ : Tuple = pipe(**_A ).images UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 32, 32, 3) ) UpperCAmelCase_ : Optional[Any] = np.array( [1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] ) UpperCAmelCase_ : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_A , 1e-3 ) def A ( self : List[Any] ) -> List[str]: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def A ( self : Any ) -> Optional[int]: super().test_save_load_local(expected_max_difference=3e-3 ) def A ( self : Optional[Any] ) -> Dict: super().test_save_load_optional_components(expected_max_difference=3e-3 ) def A ( self : Dict ) -> Dict: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class snake_case__ ( unittest.TestCase): def A ( self : Union[str, Any] ) -> Dict: UpperCAmelCase_ : Any = '''google/ddpm-cifar10-32''' UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained(_A ) UpperCAmelCase_ : int = DDIMScheduler() UpperCAmelCase_ : Optional[int] = DDIMPipeline(unet=_A , scheduler=_A ) ddim.to(_A ) ddim.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Any = ddim(generator=_A , eta=0.0 , output_type='''numpy''' ).images UpperCAmelCase_ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase_ : List[str] = np.array([0.1_723, 0.1_617, 0.1_600, 0.1_626, 0.1_497, 0.1_513, 0.1_505, 0.1_442, 0.1_453] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A ( self : Optional[int] ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = '''google/ddpm-ema-bedroom-256''' UpperCAmelCase_ : Any = UNetaDModel.from_pretrained(_A ) UpperCAmelCase_ : Optional[Any] = DDIMScheduler.from_pretrained(_A ) UpperCAmelCase_ : int = DDIMPipeline(unet=_A , scheduler=_A ) ddpm.to(_A ) ddpm.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = ddpm(generator=_A , output_type='''numpy''' ).images UpperCAmelCase_ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) UpperCAmelCase_ : int = np.array([0.0_060, 0.0_201, 0.0_344, 0.0_024, 0.0_018, 0.0_002, 0.0_022, 0.0_000, 0.0_069] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
304
'''simple docstring''' import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case__ ( UpperCamelCase): def A ( self : List[str] ) -> List[Any]: UpperCAmelCase_ : int = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) ) self.parent.assertTrue(hasattr(_A , '''num_heads''' ) ) class snake_case__ : def __init__( self : List[Any] , _A : List[str] , _A : Optional[Any]=13 , _A : List[str]=64 , _A : Tuple=3 , _A : int=[16, 48, 96] , _A : int=[1, 3, 6] , _A : Union[str, Any]=[1, 2, 10] , _A : List[Any]=[7, 3, 3] , _A : Optional[Any]=[4, 2, 2] , _A : List[Any]=[2, 1, 1] , _A : Union[str, Any]=[2, 2, 2] , _A : Tuple=[False, False, True] , _A : str=[0.0, 0.0, 0.0] , _A : List[Any]=0.02 , _A : int=1e-12 , _A : Optional[int]=True , _A : List[str]=True , _A : Union[str, Any]=2 , ) -> List[Any]: UpperCAmelCase_ : int = parent UpperCAmelCase_ : List[Any] = batch_size UpperCAmelCase_ : Any = image_size UpperCAmelCase_ : Tuple = patch_sizes UpperCAmelCase_ : int = patch_stride UpperCAmelCase_ : Any = patch_padding UpperCAmelCase_ : List[Any] = is_training UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : Union[str, Any] = num_labels UpperCAmelCase_ : List[str] = num_channels UpperCAmelCase_ : int = embed_dim UpperCAmelCase_ : Optional[int] = num_heads UpperCAmelCase_ : Tuple = stride_kv UpperCAmelCase_ : Optional[Any] = depth UpperCAmelCase_ : Dict = cls_token UpperCAmelCase_ : Dict = attention_drop_rate UpperCAmelCase_ : Any = initializer_range UpperCAmelCase_ : List[str] = layer_norm_eps def A ( self : int ) -> List[str]: UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase_ : List[str] = self.get_config() return config, pixel_values, labels def A ( self : List[str] ) -> int: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def A ( self : Dict , _A : List[Any] , _A : Tuple , _A : Optional[Any] ) -> List[str]: UpperCAmelCase_ : List[Any] = CvtModel(config=_A ) model.to(_A ) model.eval() UpperCAmelCase_ : Tuple = model(_A ) UpperCAmelCase_ : List[str] = (self.image_size, self.image_size) UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image_size[0], image_size[1] for i in range(len(self.depth ) ): UpperCAmelCase_ : int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) UpperCAmelCase_ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def A ( self : Any , _A : int , _A : str , _A : Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : str = self.num_labels UpperCAmelCase_ : str = CvtForImageClassification(_A ) model.to(_A ) model.eval() UpperCAmelCase_ : int = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Dict ) -> Any: UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = config_and_inputs UpperCAmelCase_ : Optional[int] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase): a_ = (CvtModel, CvtForImageClassification) if is_torch_available() else () a_ = ( {"feature-extraction": CvtModel, "image-classification": CvtForImageClassification} if is_torch_available() else {} ) a_ = False a_ = False a_ = False a_ = False a_ = False def A ( self : int ) -> List[str]: UpperCAmelCase_ : Optional[int] = CvtModelTester(self ) UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def A ( self : Any ) -> Dict: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A ( self : int ) -> List[str]: return @unittest.skip(reason='''Cvt does not output attentions''' ) def A ( self : Optional[int] ) -> Optional[int]: pass @unittest.skip(reason='''Cvt does not use inputs_embeds''' ) def A ( self : Any ) -> Optional[Any]: pass @unittest.skip(reason='''Cvt does not support input and output embeddings''' ) def A ( self : List[Any] ) -> Any: pass def A ( self : int ) -> str: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Tuple = model_class(_A ) UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Tuple = [*signature.parameters.keys()] UpperCAmelCase_ : str = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _A ) def A ( self : Tuple ) -> int: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def A ( self : Dict ) -> List[str]: def check_hidden_states_output(_A : Dict , _A : str , _A : int ): UpperCAmelCase_ : str = model_class(_A ) model.to(_A ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(_A , _A ) ) UpperCAmelCase_ : Optional[Any] = outputs.hidden_states UpperCAmelCase_ : Any = len(self.model_tester.depth ) self.assertEqual(len(_A ) , _A ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = True check_hidden_states_output(_A , _A , _A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Dict = True check_hidden_states_output(_A , _A , _A ) def A ( self : Union[str, Any] ) -> List[str]: UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def A ( self : List[Any] ) -> Optional[Any]: pass @slow def A ( self : Optional[int] ) -> int: for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[Any] = CvtModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def __UpperCAmelCase ( ) -> str: UpperCAmelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class snake_case__ ( unittest.TestCase): @cached_property def A ( self : Union[str, Any] ) -> Union[str, Any]: return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def A ( self : str ) -> str: UpperCAmelCase_ : str = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_A ) UpperCAmelCase_ : Optional[int] = self.default_image_processor UpperCAmelCase_ : List[str] = prepare_img() UpperCAmelCase_ : List[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Any = model(**_A ) # verify the logits UpperCAmelCase_ : Tuple = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , _A ) UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(_A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
304
1
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() _UpperCamelCase : Dict = logging.get_logger('transformers.models.speecht5') def __UpperCAmelCase ( A : List[str] , A : List[str] , A : Union[str, Any] ) -> Dict: hf_model.apply_weight_norm() UpperCAmelCase_ : Dict = checkpoint['''input_conv.weight_g'''] UpperCAmelCase_ : Optional[Any] = checkpoint['''input_conv.weight_v'''] UpperCAmelCase_ : Tuple = checkpoint['''input_conv.bias'''] for i in range(len(config.upsample_rates ) ): UpperCAmelCase_ : Dict = checkpoint[F"upsamples.{i}.1.weight_g"] UpperCAmelCase_ : List[str] = checkpoint[F"upsamples.{i}.1.weight_v"] UpperCAmelCase_ : Dict = checkpoint[F"upsamples.{i}.1.bias"] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): UpperCAmelCase_ : List[Any] = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_g"] UpperCAmelCase_ : Dict = checkpoint[F"blocks.{i}.convs1.{j}.1.weight_v"] UpperCAmelCase_ : List[str] = checkpoint[F"blocks.{i}.convs1.{j}.1.bias"] UpperCAmelCase_ : int = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_g"] UpperCAmelCase_ : Dict = checkpoint[F"blocks.{i}.convs2.{j}.1.weight_v"] UpperCAmelCase_ : Union[str, Any] = checkpoint[F"blocks.{i}.convs2.{j}.1.bias"] UpperCAmelCase_ : Tuple = checkpoint['''output_conv.1.weight_g'''] UpperCAmelCase_ : Tuple = checkpoint['''output_conv.1.weight_v'''] UpperCAmelCase_ : str = checkpoint['''output_conv.1.bias'''] hf_model.remove_weight_norm() @torch.no_grad() def __UpperCAmelCase ( A : Tuple , A : Dict , A : List[str] , A : List[str]=None , A : Tuple=None , ) -> Dict: if config_path is not None: UpperCAmelCase_ : int = SpeechTaHifiGanConfig.from_pretrained(A ) else: UpperCAmelCase_ : List[Any] = SpeechTaHifiGanConfig() UpperCAmelCase_ : int = SpeechTaHifiGan(A ) UpperCAmelCase_ : Dict = torch.load(A ) load_weights(orig_checkpoint['''model''']['''generator'''] , A , A ) UpperCAmelCase_ : Optional[Any] = np.load(A ) UpperCAmelCase_ : int = stats[0].reshape(-1 ) UpperCAmelCase_ : Optional[int] = stats[1].reshape(-1 ) UpperCAmelCase_ : Union[str, Any] = torch.from_numpy(A ).float() UpperCAmelCase_ : Optional[Any] = torch.from_numpy(A ).float() model.save_pretrained(A ) if repo_id: print('''Pushing to the hub...''' ) model.push_to_hub(A ) if __name__ == "__main__": _UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint') parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) _UpperCamelCase : List[str] = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
304
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase) class snake_case__ ( UpperCamelCase): a_ = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True}) a_ = Features({"text": Value("string")}) a_ = Features({}) a_ = "text" @property def A ( self : List[str] ) -> Dict[str, str]: return {self.text_column: "text"}
304
1
'''simple docstring''' import csv import tweepy # Twitter API credentials _UpperCamelCase : str = '' _UpperCamelCase : Optional[int] = '' _UpperCamelCase : Tuple = '' _UpperCamelCase : Optional[Any] = '' def __UpperCAmelCase ( A : str ) -> None: # authorize twitter, initialize tweepy UpperCAmelCase_ : str = tweepy.OAuthHandler(A , A ) auth.set_access_token(A , A ) UpperCAmelCase_ : Optional[Any] = tweepy.API(A ) # initialize a list to hold all the tweepy Tweets UpperCAmelCase_ : Dict = [] # make initial request for most recent tweets (200 is the maximum allowed count) UpperCAmelCase_ : Any = api.user_timeline(screen_name=A , count=2_0_0 ) # save most recent tweets alltweets.extend(A ) # save the id of the oldest tweet less one UpperCAmelCase_ : str = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(A ) > 0: print(F"getting tweets before {oldest}" ) # all subsequent requests use the max_id param to prevent duplicates UpperCAmelCase_ : Tuple = api.user_timeline( screen_name=A , count=2_0_0 , max_id=A ) # save most recent tweets alltweets.extend(A ) # update the id of the oldest tweet less one UpperCAmelCase_ : str = alltweets[-1].id - 1 print(F"...{len(A )} tweets downloaded so far" ) # transform the tweepy tweets into a 2D array that will populate the csv UpperCAmelCase_ : Any = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(F"new_{screen_name}_tweets.csv" , '''w''' ) as f: UpperCAmelCase_ : str = csv.writer(A ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(A ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('FirePing32')
304
'''simple docstring''' import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def __UpperCAmelCase ( A : int , A : Any="shi-labs/oneformer_demo" ) -> Dict: with open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) as f: UpperCAmelCase_ : Union[str, Any] = json.load(A ) UpperCAmelCase_ : Optional[int] = {} UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : str = [] for key, info in class_info.items(): UpperCAmelCase_ : Tuple = info['''name'''] class_names.append(info['''name'''] ) if info["isthing"]: thing_ids.append(int(A ) ) UpperCAmelCase_ : Any = thing_ids UpperCAmelCase_ : Union[str, Any] = class_names return metadata class snake_case__ ( unittest.TestCase): def __init__( self : Any , _A : str , _A : Optional[int]=7 , _A : Tuple=3 , _A : Tuple=30 , _A : List[Any]=4_00 , _A : Tuple=None , _A : Optional[Any]=True , _A : Optional[Any]=True , _A : Any=[0.5, 0.5, 0.5] , _A : Any=[0.5, 0.5, 0.5] , _A : List[str]=10 , _A : Optional[int]=False , _A : Union[str, Any]=2_55 , _A : List[Any]="shi-labs/oneformer_demo" , _A : str="ade20k_panoptic.json" , _A : List[Any]=10 , ) -> Any: UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : Optional[Any] = batch_size UpperCAmelCase_ : Optional[Any] = num_channels UpperCAmelCase_ : Tuple = min_resolution UpperCAmelCase_ : Optional[int] = max_resolution UpperCAmelCase_ : Dict = do_resize UpperCAmelCase_ : Tuple = {'''shortest_edge''': 32, '''longest_edge''': 13_33} if size is None else size UpperCAmelCase_ : int = do_normalize UpperCAmelCase_ : List[Any] = image_mean UpperCAmelCase_ : Dict = image_std UpperCAmelCase_ : str = class_info_file UpperCAmelCase_ : Optional[Any] = prepare_metadata(_A , _A ) UpperCAmelCase_ : Tuple = num_text UpperCAmelCase_ : Union[str, Any] = repo_path # for the post_process_functions UpperCAmelCase_ : Any = 2 UpperCAmelCase_ : Dict = 10 UpperCAmelCase_ : int = 10 UpperCAmelCase_ : Optional[Any] = 3 UpperCAmelCase_ : str = 4 UpperCAmelCase_ : int = num_labels UpperCAmelCase_ : Union[str, Any] = do_reduce_labels UpperCAmelCase_ : str = ignore_index def A ( self : Dict ) -> List[Any]: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def A ( self : Any , _A : List[Any] , _A : List[str]=False ) -> Optional[Any]: if not batched: UpperCAmelCase_ : Any = image_inputs[0] if isinstance(_A , Image.Image ): UpperCAmelCase_ , UpperCAmelCase_ : Dict = image.size else: UpperCAmelCase_ , UpperCAmelCase_ : int = image.shape[1], image.shape[2] if w < h: UpperCAmelCase_ : Union[str, Any] = int(self.size['''shortest_edge'''] * h / w ) UpperCAmelCase_ : int = self.size['''shortest_edge'''] elif w > h: UpperCAmelCase_ : List[Any] = self.size['''shortest_edge'''] UpperCAmelCase_ : Any = int(self.size['''shortest_edge'''] * w / h ) else: UpperCAmelCase_ : Dict = self.size['''shortest_edge'''] UpperCAmelCase_ : str = self.size['''shortest_edge'''] else: UpperCAmelCase_ : Dict = [] for image in image_inputs: UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase_ : int = max(_A , key=lambda _A : item[0] )[0] UpperCAmelCase_ : List[str] = max(_A , key=lambda _A : item[1] )[1] return expected_height, expected_width def A ( self : Tuple ) -> str: return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class snake_case__ ( UpperCamelCase , unittest.TestCase): a_ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string a_ = image_processing_class def A ( self : Optional[int] ) -> Any: UpperCAmelCase_ : int = OneFormerImageProcessorTester(self ) @property def A ( self : Any ) -> int: return self.image_processing_tester.prepare_image_processor_dict() def A ( self : Optional[Any] ) -> List[Any]: UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''ignore_index''' ) ) self.assertTrue(hasattr(_A , '''class_info_file''' ) ) self.assertTrue(hasattr(_A , '''num_text''' ) ) self.assertTrue(hasattr(_A , '''repo_path''' ) ) self.assertTrue(hasattr(_A , '''metadata''' ) ) self.assertTrue(hasattr(_A , '''do_reduce_labels''' ) ) def A ( self : Dict ) -> Dict: pass def A ( self : Tuple ) -> Dict: # Initialize image_processor UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input UpperCAmelCase_ : str = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A , batched=_A ) UpperCAmelCase_ : int = image_processor( _A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def A ( self : Tuple ) -> Tuple: # Initialize image_processor UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input UpperCAmelCase_ : List[str] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.image_processing_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ , UpperCAmelCase_ : str = self.image_processing_tester.get_expected_values(_A , batched=_A ) UpperCAmelCase_ : Tuple = image_processor( _A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def A ( self : Dict ) -> Union[str, Any]: # Initialize image_processor UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input UpperCAmelCase_ : int = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.image_processing_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ , UpperCAmelCase_ : int = self.image_processing_tester.get_expected_values(_A , batched=_A ) UpperCAmelCase_ : Optional[int] = image_processor( _A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def A ( self : int , _A : Any=False , _A : List[Any]=False , _A : Any="np" ) -> str: UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # prepare image and target UpperCAmelCase_ : Tuple = self.image_processing_tester.num_labels UpperCAmelCase_ : int = None UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A ) if with_segmentation_maps: UpperCAmelCase_ : Any = num_labels if is_instance_map: UpperCAmelCase_ : Any = list(range(_A ) ) * 2 UpperCAmelCase_ : Optional[Any] = dict(enumerate(_A ) ) UpperCAmelCase_ : Dict = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": UpperCAmelCase_ : Dict = [Image.fromarray(_A ) for annotation in annotations] UpperCAmelCase_ : Tuple = image_processor( _A , ['''semantic'''] * len(_A ) , _A , return_tensors='''pt''' , instance_id_to_semantic_id=_A , pad_and_return_pixel_mask=_A , ) return inputs def A ( self : int ) -> str: pass def A ( self : Tuple ) -> Union[str, Any]: def common(_A : Optional[int]=False , _A : str=None ): UpperCAmelCase_ : List[str] = self.comm_get_image_processor_inputs( with_segmentation_maps=_A , is_instance_map=_A , segmentation_type=_A ) UpperCAmelCase_ : List[Any] = inputs['''mask_labels'''] UpperCAmelCase_ : Optional[Any] = inputs['''class_labels'''] UpperCAmelCase_ : int = inputs['''pixel_values'''] UpperCAmelCase_ : Tuple = inputs['''text_inputs'''] # check the batch_size for mask_label, class_label, text_input in zip(_A , _A , _A ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(_A ) , self.image_processing_tester.num_text ) common() common(is_instance_map=_A ) common(is_instance_map=_A , segmentation_type='''pil''' ) common(is_instance_map=_A , segmentation_type='''pil''' ) def A ( self : List[Any] ) -> List[Any]: UpperCAmelCase_ : int = np.zeros((20, 50) ) UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : Dict = 1 UpperCAmelCase_ : List[Any] = 1 UpperCAmelCase_ : List[Any] = binary_mask_to_rle(_A ) self.assertEqual(len(_A ) , 4 ) self.assertEqual(rle[0] , 21 ) self.assertEqual(rle[1] , 45 ) def A ( self : Any ) -> List[Any]: UpperCAmelCase_ : int = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) UpperCAmelCase_ : Any = self.image_processing_tester.get_fake_oneformer_outputs() UpperCAmelCase_ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(_A ) self.assertEqual(len(_A ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) UpperCAmelCase_ : List[str] = [(1, 4) for i in range(self.image_processing_tester.batch_size )] UpperCAmelCase_ : Any = fature_extractor.post_process_semantic_segmentation(_A , target_sizes=_A ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def A ( self : Optional[Any] ) -> Tuple: UpperCAmelCase_ : Any = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) UpperCAmelCase_ : Dict = self.image_processing_tester.get_fake_oneformer_outputs() UpperCAmelCase_ : List[Any] = image_processor.post_process_instance_segmentation(_A , threshold=0 ) self.assertTrue(len(_A ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('''segmentation''' in el ) self.assertTrue('''segments_info''' in el ) self.assertEqual(type(el['''segments_info'''] ) , _A ) self.assertEqual( el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def A ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) UpperCAmelCase_ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs() UpperCAmelCase_ : List[Any] = image_processor.post_process_panoptic_segmentation(_A , threshold=0 ) self.assertTrue(len(_A ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('''segmentation''' in el ) self.assertTrue('''segments_info''' in el ) self.assertEqual(type(el['''segments_info'''] ) , _A ) self.assertEqual( el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
304
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _UpperCamelCase : int = logging.get_logger(__name__) class snake_case__ ( UpperCamelCase): a_ = ["pixel_values"] def __init__( self : Optional[int] , _A : bool = True , _A : int = 32 , _A : str=PILImageResampling.BILINEAR , _A : bool = True , **_A : List[str] , ) -> None: UpperCAmelCase_ : Dict = do_resize UpperCAmelCase_ : Optional[Any] = do_rescale UpperCAmelCase_ : Optional[Any] = size_divisor UpperCAmelCase_ : Dict = resample super().__init__(**_A ) def A ( self : Dict , _A : np.ndarray , _A : int , _A : Optional[int] , _A : Optional[ChannelDimension] = None , **_A : Tuple ) -> np.ndarray: UpperCAmelCase_ , UpperCAmelCase_ : str = get_image_size(_A ) # Rounds the height and width down to the closest multiple of size_divisor UpperCAmelCase_ : Optional[Any] = height // size_divisor * size_divisor UpperCAmelCase_ : int = width // size_divisor * size_divisor UpperCAmelCase_ : Tuple = resize(_A , (new_h, new_w) , resample=_A , data_format=_A , **_A ) return image def A ( self : Any , _A : np.ndarray , _A : float , _A : Optional[ChannelDimension] = None , **_A : Optional[int] ) -> np.ndarray: return rescale(image=_A , scale=_A , data_format=_A , **_A ) def A ( self : int , _A : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , _A : Optional[bool] = None , _A : Optional[int] = None , _A : List[str]=None , _A : Optional[bool] = None , _A : Optional[Union[TensorType, str]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Tuple , ) -> BatchFeature: UpperCAmelCase_ : str = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : int = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : str = size_divisor if size_divisor is not None else self.size_divisor UpperCAmelCase_ : Optional[Any] = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) UpperCAmelCase_ : Tuple = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. UpperCAmelCase_ : Optional[Any] = [to_numpy_array(_A ) for img in images] if do_resize: UpperCAmelCase_ : Dict = [self.resize(_A , size_divisor=_A , resample=_A ) for image in images] if do_rescale: UpperCAmelCase_ : Any = [self.rescale(_A , scale=1 / 2_55 ) for image in images] UpperCAmelCase_ : Any = [to_channel_dimension_format(_A , _A ) for image in images] UpperCAmelCase_ : Optional[Any] = {'''pixel_values''': images} return BatchFeature(data=_A , tensor_type=_A )
304
'''simple docstring''' import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py _UpperCamelCase : Optional[int] = 'src/transformers' # This is to make sure the transformers module imported is the one in the repo. _UpperCamelCase : List[str] = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. _UpperCamelCase : Tuple = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') _UpperCamelCase : str = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _UpperCamelCase : Optional[int] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # Fill this with tuples (pipeline_tag, model_mapping, auto_model) _UpperCamelCase : List[str] = [ ('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'), ('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'), ('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'), ('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'), ('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'), ('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'), ('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'), ('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'), ('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'), ( 'zero-shot-object-detection', 'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForZeroShotObjectDetection', ), ('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'), ('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'), ('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'), ('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'), ( 'table-question-answering', 'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForTableQuestionAnswering', ), ('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'), ('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'), ( 'next-sentence-prediction', 'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES', 'AutoModelForNextSentencePrediction', ), ( 'audio-frame-classification', 'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioFrameClassification', ), ('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'), ( 'document-question-answering', 'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForDocumentQuestionAnswering', ), ( 'visual-question-answering', 'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForVisualQuestionAnswering', ), ('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'), ( 'zero-shot-image-classification', 'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForZeroShotImageClassification', ), ('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'), ('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'), ('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'), ] def __UpperCAmelCase ( A : Optional[int] ) -> int: UpperCAmelCase_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , A ) return [m.group(0 ) for m in matches] def __UpperCAmelCase ( ) -> str: UpperCAmelCase_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES UpperCAmelCase_ : Optional[Any] = { config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. UpperCAmelCase_ : Dict = collections.defaultdict(A ) UpperCAmelCase_ : str = collections.defaultdict(A ) UpperCAmelCase_ : int = collections.defaultdict(A ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(A ): UpperCAmelCase_ : int = None if _re_tf_models.match(A ) is not None: UpperCAmelCase_ : Optional[Any] = tf_models UpperCAmelCase_ : Optional[int] = _re_tf_models.match(A ).groups()[0] elif _re_flax_models.match(A ) is not None: UpperCAmelCase_ : int = flax_models UpperCAmelCase_ : Any = _re_flax_models.match(A ).groups()[0] elif _re_pt_models.match(A ) is not None: UpperCAmelCase_ : Union[str, Any] = pt_models UpperCAmelCase_ : List[Any] = _re_pt_models.match(A ).groups()[0] if lookup_dict is not None: while len(A ) > 0: if attr_name in model_prefix_to_model_type: UpperCAmelCase_ : Optional[int] = True break # Try again after removing the last word in the name UpperCAmelCase_ : List[Any] = ''''''.join(camel_case_split(A )[:-1] ) UpperCAmelCase_ : Tuple = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) UpperCAmelCase_ : List[Any] = list(A ) all_models.sort() UpperCAmelCase_ : Dict = {'''model_type''': all_models} UpperCAmelCase_ : Tuple = [pt_models[t] for t in all_models] UpperCAmelCase_ : Dict = [tf_models[t] for t in all_models] UpperCAmelCase_ : Optional[int] = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure UpperCAmelCase_ : int = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: UpperCAmelCase_ : Any = '''AutoProcessor''' elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: UpperCAmelCase_ : Union[str, Any] = '''AutoTokenizer''' elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: UpperCAmelCase_ : int = '''AutoFeatureExtractor''' else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. UpperCAmelCase_ : Dict = '''AutoTokenizer''' UpperCAmelCase_ : str = [processors[t] for t in all_models] return pd.DataFrame(A ) def __UpperCAmelCase ( A : Optional[int] ) -> str: UpperCAmelCase_ : int = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: UpperCAmelCase_ : Tuple = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"] UpperCAmelCase_ : Tuple = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"] # Loop through all three frameworks for module, cls, mapping in zip(A , A , A ): # The type of pipeline may not exist in this framework if not hasattr(A , A ): continue # First extract all model_names UpperCAmelCase_ : List[str] = [] for name in getattr(A , A ).values(): if isinstance(A , A ): model_names.append(A ) else: model_names.extend(list(A ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def __UpperCAmelCase ( A : int , A : Any ) -> Tuple: UpperCAmelCase_ : Tuple = get_frameworks_table() UpperCAmelCase_ : Any = Dataset.from_pandas(A ) UpperCAmelCase_ : str = hf_hub_download( '''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=A ) UpperCAmelCase_ : Union[str, Any] = Dataset.from_json(A ) UpperCAmelCase_ : Optional[int] = { tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class''']) for i in range(len(A ) ) } UpperCAmelCase_ : str = update_pipeline_and_auto_class_table(A ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. UpperCAmelCase_ : Union[str, Any] = sorted(table.keys() ) UpperCAmelCase_ : Optional[Any] = pd.DataFrame( { '''model_class''': model_classes, '''pipeline_tag''': [table[m][0] for m in model_classes], '''auto_class''': [table[m][1] for m in model_classes], } ) UpperCAmelCase_ : Dict = Dataset.from_pandas(A ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(A , '''frameworks.json''' ) ) tags_dataset.to_json(os.path.join(A , '''pipeline_tags.json''' ) ) if commit_sha is not None: UpperCAmelCase_ : List[str] = ( F"Update with commit {commit_sha}\n\nSee: " F"https://github.com/huggingface/transformers/commit/{commit_sha}" ) else: UpperCAmelCase_ : int = '''Update''' upload_folder( repo_id='''huggingface/transformers-metadata''' , folder_path=A , repo_type='''dataset''' , token=A , commit_message=A , ) def __UpperCAmelCase ( ) -> int: UpperCAmelCase_ : str = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} UpperCAmelCase_ : List[str] = transformers_module.pipelines.SUPPORTED_TASKS UpperCAmelCase_ : List[str] = [] for key in pipeline_tasks: if key not in in_table: UpperCAmelCase_ : Optional[Any] = pipeline_tasks[key]['''pt'''] if isinstance(A , (list, tuple) ): UpperCAmelCase_ : Dict = model[0] UpperCAmelCase_ : Any = model.__name__ if model not in in_table.values(): missing.append(A ) if len(A ) > 0: UpperCAmelCase_ : List[Any] = ''', '''.join(A ) raise ValueError( '''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside ''' F"`utils/update_metadata.py`: {msg}. Please add them!" ) if __name__ == "__main__": _UpperCamelCase : int = argparse.ArgumentParser() parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.') parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.') parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.') _UpperCamelCase : Tuple = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
304
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _UpperCamelCase : Optional[int] = { 'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig'] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Optional[Any] = ['VisionEncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Optional[Any] = ['TFVisionEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : str = ['FlaxVisionEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys _UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
304
'''simple docstring''' import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _UpperCamelCase : Union[str, Any] = logging.getLogger(__name__) _UpperCamelCase : Optional[int] = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) _UpperCamelCase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class snake_case__ : a_ = field( default=UpperCamelCase , metadata={ "help": ( "The model checkpoint for weights initialization. Leave None if you want to train a model from" " scratch." ) } , ) a_ = field( default=UpperCamelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCamelCase)} , ) a_ = field( default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"}) a_ = field( default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) a_ = field( default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class snake_case__ : a_ = field( default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."}) a_ = field( default=UpperCamelCase , metadata={ "help": ( "The input training data files (multiple files in glob format). " "Very often splitting large files to smaller files can prevent tokenizer going out of memory" ) } , ) a_ = field( default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) a_ = field( default=UpperCamelCase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , ) a_ = field( default=UpperCamelCase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , ) a_ = field( default=UpperCamelCase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , ) a_ = field( default=UpperCamelCase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."}) a_ = field(default=UpperCamelCase , metadata={"help": "Whether ot not to use whole word mask."}) a_ = field( default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}) a_ = field( default=1 / 6 , metadata={ "help": ( "Ratio of length of a span of masked tokens to surrounding context length for permutation language" " modeling." ) } , ) a_ = field( default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."}) a_ = field( default=-1 , metadata={ "help": ( "Optional input sequence length after tokenization." "The training dataset will be truncated in block of this size for training." "Default to the model max input length for single sentence inputs (take into account special tokens)." ) } , ) a_ = field( default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"}) def __UpperCAmelCase ( A : DataTrainingArguments , A : PreTrainedTokenizer , A : bool = False , A : Optional[str] = None , ) -> List[Any]: def _dataset(A : Dict , A : str=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' ) return LineByLineWithRefDataset( tokenizer=A , file_path=A , block_size=args.block_size , ref_path=A , ) return LineByLineTextDataset(tokenizer=A , file_path=A , block_size=args.block_size ) else: return TextDataset( tokenizer=A , file_path=A , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=A , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(A ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def __UpperCAmelCase ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCAmelCase_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( '''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file ''' '''or remove the --do_eval argument.''' ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. Use" ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , A ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: UpperCAmelCase_ : List[Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.tokenizer_name: UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another''' ''' script, save it,and load it from here, using --tokenizer_name''' ) if model_args.model_name_or_path: UpperCAmelCase_ : str = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , ) else: logger.info('''Training new model from scratch''' ) UpperCAmelCase_ : int = AutoModelWithLMHead.from_config(A ) model.resize_token_embeddings(len(A ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( '''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the''' '''--mlm flag (masked language modeling).''' ) if data_args.block_size <= 0: UpperCAmelCase_ : List[str] = tokenizer.max_len # Our input block size will be the max possible for the model else: UpperCAmelCase_ : Dict = min(data_args.block_size , tokenizer.max_len ) # Get datasets UpperCAmelCase_ : str = ( get_dataset(A , tokenizer=A , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) UpperCAmelCase_ : Any = ( get_dataset(A , tokenizer=A , evaluate=A , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": UpperCAmelCase_ : Optional[int] = DataCollatorForPermutationLanguageModeling( tokenizer=A , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: UpperCAmelCase_ : Tuple = DataCollatorForWholeWordMask( tokenizer=A , mlm_probability=data_args.mlm_probability ) else: UpperCAmelCase_ : List[str] = DataCollatorForLanguageModeling( tokenizer=A , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer UpperCAmelCase_ : Any = Trainer( model=A , args=A , data_collator=A , train_dataset=A , eval_dataset=A , prediction_loss_only=A , ) # Training if training_args.do_train: UpperCAmelCase_ : List[str] = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=A ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation UpperCAmelCase_ : Tuple = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) UpperCAmelCase_ : Dict = trainer.evaluate() UpperCAmelCase_ : Union[str, Any] = math.exp(eval_output['''eval_loss'''] ) UpperCAmelCase_ : Optional[int] = {'''perplexity''': perplexity} UpperCAmelCase_ : int = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' ) if trainer.is_world_master(): with open(A , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''' , A , str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) results.update(A ) return results def __UpperCAmelCase ( A : Tuple ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
304
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _UpperCamelCase : Tuple = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : str = ['YolosFeatureExtractor'] _UpperCamelCase : Any = ['YolosImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Union[str, Any] = [ 'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST', 'YolosForObjectDetection', 'YolosModel', 'YolosPreTrainedModel', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys _UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
304
'''simple docstring''' import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel _UpperCamelCase : Optional[int] = '0.12' # assumed parallelism: 8 @require_flax @is_staging_test class snake_case__ ( unittest.TestCase): @classmethod def A ( cls : Optional[int] ) -> Tuple: UpperCAmelCase_ : List[str] = TOKEN HfFolder.save_token(_A ) @classmethod def A ( cls : int ) -> Tuple: try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def A ( self : Dict ) -> Optional[int]: UpperCAmelCase_ : List[Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) UpperCAmelCase_ : List[str] = FlaxBertModel(_A ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) UpperCAmelCase_ : Any = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" ) UpperCAmelCase_ : int = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : List[str] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_A , repo_id='''test-model-flax''' , push_to_hub=_A , use_auth_token=self._token ) UpperCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" ) UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : int = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" ) def A ( self : str ) -> Tuple: UpperCAmelCase_ : List[str] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) UpperCAmelCase_ : Optional[Any] = FlaxBertModel(_A ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) UpperCAmelCase_ : List[str] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : Any = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( _A , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_A , use_auth_token=self._token ) UpperCAmelCase_ : int = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : Tuple = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" ) def __UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] ) -> List[Any]: UpperCAmelCase_ : Optional[int] = True UpperCAmelCase_ : Optional[int] = flatten_dict(modela.params ) UpperCAmelCase_ : str = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4: UpperCAmelCase_ : int = False return models_are_equal @require_flax class snake_case__ ( unittest.TestCase): def A ( self : Any ) -> Any: UpperCAmelCase_ : Any = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) UpperCAmelCase_ : Any = FlaxBertModel(_A ) UpperCAmelCase_ : Tuple = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_A , _A ) ) with self.assertRaises(_A ): UpperCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(_A ) UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A ) self.assertTrue(check_models_equal(_A , _A ) ) def A ( self : int ) -> Tuple: UpperCAmelCase_ : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) UpperCAmelCase_ : Tuple = FlaxBertModel(_A ) UpperCAmelCase_ : str = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_A , _A ) , max_shard_size='''10KB''' ) with self.assertRaises(_A ): UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(_A ) UpperCAmelCase_ : Dict = FlaxBertModel.from_pretrained(_A , subfolder=_A ) self.assertTrue(check_models_equal(_A , _A ) ) def A ( self : int ) -> Optional[int]: UpperCAmelCase_ : int = '''bert''' UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(_A ): UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(_A ) UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(_A , subfolder=_A ) self.assertIsNotNone(_A ) def A ( self : Any ) -> str: UpperCAmelCase_ : Optional[Any] = '''bert''' UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(_A ): UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A ) UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A ) self.assertIsNotNone(_A )
304
1
'''simple docstring''' import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class snake_case__ ( enum.Enum): a_ = 0 a_ = 1 a_ = 2 @add_end_docstrings(UpperCamelCase) class snake_case__ ( UpperCamelCase): a_ = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n " def __init__( self : List[str] , *_A : Dict , **_A : int ) -> Optional[int]: super().__init__(*_A , **_A ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. UpperCAmelCase_ : Dict = None if self.model.config.prefix is not None: UpperCAmelCase_ : Tuple = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. UpperCAmelCase_ : Optional[Any] = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self._sanitize_parameters(prefix=_A , **self._forward_params ) UpperCAmelCase_ : int = {**self._preprocess_params, **preprocess_params} UpperCAmelCase_ : List[str] = {**self._forward_params, **forward_params} def A ( self : Union[str, Any] , _A : int=None , _A : str=None , _A : Union[str, Any]=None , _A : List[Any]=None , _A : List[Any]=None , _A : int=None , _A : Optional[int]=None , _A : List[Any]=None , **_A : List[Any] , ) -> Dict: UpperCAmelCase_ : Union[str, Any] = {} if prefix is not None: UpperCAmelCase_ : List[Any] = prefix if prefix: UpperCAmelCase_ : Tuple = self.tokenizer( _A , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) UpperCAmelCase_ : List[Any] = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected" ''' [None, \'hole\']''' ) UpperCAmelCase_ : Union[str, Any] = handle_long_generation preprocess_params.update(_A ) UpperCAmelCase_ : Optional[int] = generate_kwargs UpperCAmelCase_ : Tuple = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) UpperCAmelCase_ : int = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) UpperCAmelCase_ : List[Any] = ReturnType.TENSORS if return_type is not None: UpperCAmelCase_ : List[Any] = return_type if clean_up_tokenization_spaces is not None: UpperCAmelCase_ : List[Any] = clean_up_tokenization_spaces if stop_sequence is not None: UpperCAmelCase_ : Any = self.tokenizer.encode(_A , add_special_tokens=_A ) if len(_A ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) UpperCAmelCase_ : str = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def A ( self : Dict , *_A : Optional[Any] , **_A : Any ) -> Any: # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*_A , **_A ) def __call__( self : List[Any] , _A : Union[str, Any] , **_A : List[str] ) -> Dict: return super().__call__(_A , **_A ) def A ( self : List[Any] , _A : List[Any] , _A : Any="" , _A : Dict=None , **_A : Dict ) -> Optional[Any]: UpperCAmelCase_ : Tuple = self.tokenizer( prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) UpperCAmelCase_ : str = prompt_text if handle_long_generation == "hole": UpperCAmelCase_ : List[str] = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: UpperCAmelCase_ : Optional[int] = generate_kwargs['''max_new_tokens'''] else: UpperCAmelCase_ : Union[str, Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: UpperCAmelCase_ : Dict = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) UpperCAmelCase_ : List[str] = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: UpperCAmelCase_ : Optional[int] = inputs['''attention_mask'''][:, -keep_length:] return inputs def A ( self : List[str] , _A : Optional[Any] , **_A : str ) -> Optional[int]: UpperCAmelCase_ : Any = model_inputs['''input_ids'''] UpperCAmelCase_ : Dict = model_inputs.get('''attention_mask''' , _A ) # Allow empty prompts if input_ids.shape[1] == 0: UpperCAmelCase_ : Any = None UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : Union[str, Any] = 1 else: UpperCAmelCase_ : Optional[int] = input_ids.shape[0] UpperCAmelCase_ : Dict = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. UpperCAmelCase_ : List[str] = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: UpperCAmelCase_ : str = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: UpperCAmelCase_ : Any = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length UpperCAmelCase_ : Optional[Any] = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL UpperCAmelCase_ : Union[str, Any] = self.model.generate(input_ids=_A , attention_mask=_A , **_A ) UpperCAmelCase_ : Any = generated_sequence.shape[0] if self.framework == "pt": UpperCAmelCase_ : List[str] = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": UpperCAmelCase_ : int = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def A ( self : int , _A : List[Any] , _A : Dict=ReturnType.FULL_TEXT , _A : Dict=True ) -> Union[str, Any]: UpperCAmelCase_ : List[str] = model_outputs['''generated_sequence'''][0] UpperCAmelCase_ : int = model_outputs['''input_ids'''] UpperCAmelCase_ : str = model_outputs['''prompt_text'''] UpperCAmelCase_ : Any = generated_sequence.numpy().tolist() UpperCAmelCase_ : int = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: UpperCAmelCase_ : Optional[Any] = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text UpperCAmelCase_ : Any = self.tokenizer.decode( _A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: UpperCAmelCase_ : List[str] = 0 else: UpperCAmelCase_ : str = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) ) if return_type == ReturnType.FULL_TEXT: UpperCAmelCase_ : Dict = prompt_text + text[prompt_length:] else: UpperCAmelCase_ : Dict = text[prompt_length:] UpperCAmelCase_ : List[str] = {'''generated_text''': all_text} records.append(_A ) return records
304
'''simple docstring''' _UpperCamelCase : Tuple = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _UpperCamelCase : Any = [{'type': 'code', 'content': INSTALL_CONTENT}] _UpperCamelCase : Dict = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
304
1
'''simple docstring''' import functools def __UpperCAmelCase ( A : str , A : str ) -> int: UpperCAmelCase_ : Optional[Any] = len(A ) UpperCAmelCase_ : List[str] = len(A ) @functools.cache def min_distance(A : int , A : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCAmelCase_ : Any = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , A ) , 1 + min_distance(A , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
304
'''simple docstring''' import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def __UpperCAmelCase ( A : List[str] , A : Any , A : Optional[int] , A : Optional[int] ) -> Optional[Any]: if isinstance(A , A ): UpperCAmelCase_ : Any = np.full((len(A ), sequence_length, 2) , A ) else: UpperCAmelCase_ : int = np.full((len(A ), sequence_length) , A ) for i, tensor in enumerate(A ): if padding_side == "right": if isinstance(A , A ): UpperCAmelCase_ : Tuple = tensor[:sequence_length] else: UpperCAmelCase_ : Dict = tensor[:sequence_length] else: if isinstance(A , A ): UpperCAmelCase_ : Optional[Any] = tensor[:sequence_length] else: UpperCAmelCase_ : int = tensor[:sequence_length] return out_tensor.tolist() def __UpperCAmelCase ( A : List[Any] ) -> str: UpperCAmelCase_ : Dict = ord(A ) if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6): return True UpperCAmelCase_ : Union[str, Any] = unicodedata.category(A ) if cat.startswith('''P''' ): return True return False @dataclass class snake_case__ ( UpperCamelCase): a_ = 42 a_ = True a_ = None a_ = None a_ = -100 a_ = "pt" def A ( self : List[Any] , _A : Dict ) -> Tuple: import torch UpperCAmelCase_ : Dict = '''label''' if '''label''' in features[0].keys() else '''labels''' UpperCAmelCase_ : List[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None UpperCAmelCase_ : Tuple = self.tokenizer.pad( _A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch UpperCAmelCase_ : Any = torch.tensor(batch['''entity_ids'''] ).shape[1] UpperCAmelCase_ : Union[str, Any] = self.tokenizer.padding_side if padding_side == "right": UpperCAmelCase_ : Optional[Any] = [ list(_A ) + [self.label_pad_token_id] * (sequence_length - len(_A )) for label in labels ] else: UpperCAmelCase_ : Any = [ [self.label_pad_token_id] * (sequence_length - len(_A )) + list(_A ) for label in labels ] UpperCAmelCase_ : Union[str, Any] = [feature['''ner_tags'''] for feature in features] UpperCAmelCase_ : Union[str, Any] = padding_tensor(_A , -1 , _A , _A ) UpperCAmelCase_ : List[str] = [feature['''original_entity_spans'''] for feature in features] UpperCAmelCase_ : int = padding_tensor(_A , (-1, -1) , _A , _A ) UpperCAmelCase_ : Union[str, Any] = {k: torch.tensor(_A , dtype=torch.intaa ) for k, v in batch.items()} return batch
304
1
'''simple docstring''' import random class snake_case__ : @staticmethod def A ( _A : str ) -> tuple[list[int], list[int]]: UpperCAmelCase_ : Dict = [ord(_A ) for i in text] UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : Any = [] for i in plain: UpperCAmelCase_ : int = random.randint(1 , 3_00 ) UpperCAmelCase_ : str = (i + k) * k cipher.append(_A ) key.append(_A ) return cipher, key @staticmethod def A ( _A : list[int] , _A : list[int] ) -> str: UpperCAmelCase_ : Dict = [] for i in range(len(_A ) ): UpperCAmelCase_ : int = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(_A ) ) return "".join(_A ) if __name__ == "__main__": _UpperCamelCase , _UpperCamelCase : Any = Onepad().encrypt('Hello') print(c, k) print(Onepad().decrypt(c, k))
304
'''simple docstring''' import functools def __UpperCAmelCase ( A : str , A : str ) -> int: UpperCAmelCase_ : Optional[Any] = len(A ) UpperCAmelCase_ : List[str] = len(A ) @functools.cache def min_distance(A : int , A : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCAmelCase_ : Any = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , A ) , 1 + min_distance(A , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
304
1
'''simple docstring''' from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase : List[str] = logging.get_logger(__name__) # TODO Update this _UpperCamelCase : Optional[int] = { 'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json', # See all ESM models at https://huggingface.co/models?filter=esm } class snake_case__ ( UpperCamelCase): a_ = "esm" def __init__( self : str , _A : str=None , _A : List[str]=None , _A : List[str]=None , _A : int=7_68 , _A : Tuple=12 , _A : Union[str, Any]=12 , _A : Optional[int]=30_72 , _A : str=0.1 , _A : List[Any]=0.1 , _A : Any=10_26 , _A : Dict=0.02 , _A : Optional[int]=1e-12 , _A : Union[str, Any]="absolute" , _A : Tuple=True , _A : Tuple=None , _A : str=False , _A : Optional[int]=False , _A : Optional[int]=None , _A : Any=None , **_A : Any , ) -> Optional[Any]: super().__init__(pad_token_id=_A , mask_token_id=_A , **_A ) UpperCAmelCase_ : Optional[Any] = vocab_size UpperCAmelCase_ : List[str] = hidden_size UpperCAmelCase_ : Tuple = num_hidden_layers UpperCAmelCase_ : List[str] = num_attention_heads UpperCAmelCase_ : Union[str, Any] = intermediate_size UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : int = attention_probs_dropout_prob UpperCAmelCase_ : str = max_position_embeddings UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Union[str, Any] = layer_norm_eps UpperCAmelCase_ : Union[str, Any] = position_embedding_type UpperCAmelCase_ : Tuple = use_cache UpperCAmelCase_ : Union[str, Any] = emb_layer_norm_before UpperCAmelCase_ : int = token_dropout UpperCAmelCase_ : Dict = is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) UpperCAmelCase_ : List[str] = EsmFoldConfig() elif isinstance(_A , _A ): UpperCAmelCase_ : str = EsmFoldConfig(**_A ) UpperCAmelCase_ : Optional[Any] = esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) UpperCAmelCase_ : Optional[Any] = get_default_vocab_list() else: UpperCAmelCase_ : str = vocab_list else: UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : Optional[int] = None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , _A ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def A ( self : Optional[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Dict = super().to_dict() if isinstance(self.esmfold_config , _A ): UpperCAmelCase_ : Optional[Any] = self.esmfold_config.to_dict() return output @dataclass class snake_case__ : a_ = None a_ = True a_ = False a_ = False a_ = False a_ = 0 a_ = True a_ = False a_ = 128 a_ = None def A ( self : str ) -> Optional[Any]: if self.trunk is None: UpperCAmelCase_ : Any = TrunkConfig() elif isinstance(self.trunk , _A ): UpperCAmelCase_ : List[Any] = TrunkConfig(**self.trunk ) def A ( self : Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ : int = asdict(self ) UpperCAmelCase_ : Any = self.trunk.to_dict() return output @dataclass class snake_case__ : a_ = 48 a_ = 1024 a_ = 128 a_ = 32 a_ = 32 a_ = 32 a_ = 0 a_ = 0 a_ = False a_ = 4 a_ = 128 a_ = None def A ( self : List[Any] ) -> Union[str, Any]: if self.structure_module is None: UpperCAmelCase_ : Union[str, Any] = StructureModuleConfig() elif isinstance(self.structure_module , _A ): UpperCAmelCase_ : Dict = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' F" {self.sequence_state_dim} and {self.sequence_state_dim}." ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." ) UpperCAmelCase_ : int = self.sequence_state_dim // self.sequence_head_width UpperCAmelCase_ : int = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." ) if self.pairwise_state_dim % 2 != 0: raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." ) if self.dropout >= 0.4: raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." ) def A ( self : Union[str, Any] ) -> Any: UpperCAmelCase_ : Any = asdict(self ) UpperCAmelCase_ : Any = self.structure_module.to_dict() return output @dataclass class snake_case__ : a_ = 384 a_ = 128 a_ = 16 a_ = 128 a_ = 12 a_ = 4 a_ = 8 a_ = 0.1 a_ = 8 a_ = 1 a_ = 2 a_ = 7 a_ = 10 a_ = 1E-8 a_ = 1E5 def A ( self : Dict ) -> List[str]: return asdict(self ) def __UpperCAmelCase ( ) -> Optional[int]: return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
304
'''simple docstring''' def __UpperCAmelCase ( A : int = 1_0_0_0 ) -> int: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = 1, 1 UpperCAmelCase_ : Dict = [] for i in range(1 , n + 1 ): UpperCAmelCase_ : Optional[int] = prev_numerator + 2 * prev_denominator UpperCAmelCase_ : Tuple = prev_numerator + prev_denominator if len(str(A ) ) > len(str(A ) ): result.append(A ) UpperCAmelCase_ : Optional[Any] = numerator UpperCAmelCase_ : Optional[int] = denominator return len(A ) if __name__ == "__main__": print(f'''{solution() = }''')
304
1
'''simple docstring''' from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case__ : def __init__( self : List[str] , _A : Tuple , _A : str=3 , _A : str=32 , _A : Union[str, Any]=3 , _A : List[Any]=10 , _A : Union[str, Any]=[10, 20, 30, 40] , _A : Any=[1, 1, 2, 1] , _A : Any=True , _A : Optional[Any]=True , _A : Optional[int]="relu" , _A : Dict=3 , _A : Optional[int]=None , ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = parent UpperCAmelCase_ : str = batch_size UpperCAmelCase_ : int = image_size UpperCAmelCase_ : Tuple = num_channels UpperCAmelCase_ : Dict = embeddings_size UpperCAmelCase_ : Union[str, Any] = hidden_sizes UpperCAmelCase_ : List[str] = depths UpperCAmelCase_ : int = is_training UpperCAmelCase_ : str = use_labels UpperCAmelCase_ : Optional[Any] = hidden_act UpperCAmelCase_ : str = num_labels UpperCAmelCase_ : Any = scope UpperCAmelCase_ : List[str] = len(_A ) def A ( self : str ) -> int: UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Tuple = None if self.use_labels: UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase_ : List[str] = self.get_config() return config, pixel_values, labels def A ( self : int ) -> Optional[Any]: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def A ( self : Union[str, Any] , _A : Tuple , _A : int , _A : Any ) -> Any: UpperCAmelCase_ : Tuple = TFRegNetModel(config=_A ) UpperCAmelCase_ : int = model(_A , training=_A ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A ( self : int , _A : List[Any] , _A : Any , _A : Optional[int] ) -> str: UpperCAmelCase_ : List[str] = self.num_labels UpperCAmelCase_ : Any = TFRegNetForImageClassification(_A ) UpperCAmelCase_ : List[str] = model(_A , labels=_A , training=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Optional[int] ) -> Any: UpperCAmelCase_ : Any = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = config_and_inputs UpperCAmelCase_ : Any = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase): a_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () a_ = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) a_ = False a_ = False a_ = False a_ = False a_ = False def A ( self : Optional[int] ) -> int: UpperCAmelCase_ : Any = TFRegNetModelTester(self ) UpperCAmelCase_ : Optional[int] = ConfigTester(self , config_class=_A , has_text_modality=_A ) def A ( self : Dict ) -> List[str]: return @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def A ( self : Any ) -> Union[str, Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , ) @slow def A ( self : Dict ) -> List[Any]: super().test_keras_fit() @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def A ( self : Optional[int] ) -> Dict: pass def A ( self : str ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = model_class(_A ) UpperCAmelCase_ : List[str] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Any = [*signature.parameters.keys()] UpperCAmelCase_ : str = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _A ) def A ( self : Any ) -> List[Any]: UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def A ( self : Tuple ) -> Any: def check_hidden_states_output(_A : Dict , _A : Any , _A : Dict ): UpperCAmelCase_ : List[Any] = model_class(_A ) UpperCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(_A , _A ) , training=_A ) UpperCAmelCase_ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase_ : Union[str, Any] = self.model_tester.num_stages self.assertEqual(len(_A ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[Any] = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCAmelCase_ : Tuple = layer_type UpperCAmelCase_ : Any = True check_hidden_states_output(_A , _A , _A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : List[str] = True check_hidden_states_output(_A , _A , _A ) def A ( self : Optional[int] ) -> List[str]: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(_A : List[str] , _A : Union[str, Any] , _A : Optional[Any] , _A : str={} ): UpperCAmelCase_ : List[str] = model(_A , return_dict=_A , **_A ) UpperCAmelCase_ : Any = model(_A , return_dict=_A , **_A ).to_tuple() def recursive_check(_A : Dict , _A : Optional[int] ): if isinstance(_A , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(_A , _A ): recursive_check(_A , _A ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(_A , _A ) ) , msg=( '''Tuple and dict output are not equal. Difference:''' F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}" ) , ) recursive_check(_A , _A ) for model_class in self.all_model_classes: UpperCAmelCase_ : str = model_class(_A ) UpperCAmelCase_ : int = self._prepare_for_class(_A , _A ) UpperCAmelCase_ : Optional[Any] = self._prepare_for_class(_A , _A ) check_equivalence(_A , _A , _A ) UpperCAmelCase_ : str = self._prepare_for_class(_A , _A , return_labels=_A ) UpperCAmelCase_ : List[str] = self._prepare_for_class(_A , _A , return_labels=_A ) check_equivalence(_A , _A , _A ) UpperCAmelCase_ : Optional[int] = self._prepare_for_class(_A , _A ) UpperCAmelCase_ : List[Any] = self._prepare_for_class(_A , _A ) check_equivalence(_A , _A , _A , {'''output_hidden_states''': True} ) UpperCAmelCase_ : List[Any] = self._prepare_for_class(_A , _A , return_labels=_A ) UpperCAmelCase_ : Dict = self._prepare_for_class(_A , _A , return_labels=_A ) check_equivalence(_A , _A , _A , {'''output_hidden_states''': True} ) def A ( self : Optional[Any] ) -> str: UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) @slow def A ( self : Tuple ) -> Optional[Any]: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : str = TFRegNetModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def __UpperCAmelCase ( ) -> int: UpperCAmelCase_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class snake_case__ ( unittest.TestCase): @cached_property def A ( self : Optional[int] ) -> int: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def A ( self : str ) -> int: UpperCAmelCase_ : Any = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCAmelCase_ : Dict = self.default_image_processor UpperCAmelCase_ : Any = prepare_img() UpperCAmelCase_ : List[Any] = image_processor(images=_A , return_tensors='''tf''' ) # forward pass UpperCAmelCase_ : int = model(**_A , training=_A ) # verify the logits UpperCAmelCase_ : str = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , _A ) UpperCAmelCase_ : List[str] = tf.constant([-0.4_180, -1.5_051, -3.4_836] ) tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1e-4 )
304
'''simple docstring''' import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class snake_case__ ( unittest.TestCase): def __init__( self : int , _A : List[str] , _A : Dict=7 , _A : List[str]=3 , _A : List[str]=18 , _A : Dict=30 , _A : Union[str, Any]=4_00 , _A : List[str]=True , _A : List[str]=None , _A : int=True , _A : Tuple=None , _A : Union[str, Any]=True , _A : Tuple=[0.5, 0.5, 0.5] , _A : Union[str, Any]=[0.5, 0.5, 0.5] , _A : Tuple=False , ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = size if size is not None else {'''height''': 20, '''width''': 20} UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : Optional[int] = batch_size UpperCAmelCase_ : Any = num_channels UpperCAmelCase_ : Optional[Any] = image_size UpperCAmelCase_ : Tuple = min_resolution UpperCAmelCase_ : Tuple = max_resolution UpperCAmelCase_ : Optional[int] = do_resize UpperCAmelCase_ : Tuple = size UpperCAmelCase_ : Optional[Any] = do_center_crop UpperCAmelCase_ : Optional[int] = crop_size UpperCAmelCase_ : Tuple = do_normalize UpperCAmelCase_ : Optional[Any] = image_mean UpperCAmelCase_ : int = image_std UpperCAmelCase_ : List[Any] = do_reduce_labels def A ( self : Union[str, Any] ) -> str: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def __UpperCAmelCase ( ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) UpperCAmelCase_ : Optional[Any] = Image.open(dataset[0]['''file'''] ) UpperCAmelCase_ : str = Image.open(dataset[1]['''file'''] ) return image, map def __UpperCAmelCase ( ) -> Any: UpperCAmelCase_ : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) UpperCAmelCase_ : int = Image.open(ds[0]['''file'''] ) UpperCAmelCase_ : Optional[Any] = Image.open(ds[1]['''file'''] ) UpperCAmelCase_ : Dict = Image.open(ds[2]['''file'''] ) UpperCAmelCase_ : List[str] = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class snake_case__ ( UpperCamelCase , unittest.TestCase): a_ = BeitImageProcessor if is_vision_available() else None def A ( self : Optional[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = BeitImageProcessingTester(self ) @property def A ( self : List[Any] ) -> Tuple: return self.image_processor_tester.prepare_image_processor_dict() def A ( self : List[Any] ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''center_crop''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) def A ( self : List[str] ) -> Optional[int]: UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) self.assertEqual(image_processor.do_reduce_labels , _A ) UpperCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) self.assertEqual(image_processor.do_reduce_labels , _A ) def A ( self : Optional[Any] ) -> Any: pass def A ( self : List[str] ) -> Optional[int]: # Initialize image_processing UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase_ : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def A ( self : Union[str, Any] ) -> Union[str, Any]: # Initialize image_processing UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def A ( self : Optional[int] ) -> str: # Initialize image_processing UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def A ( self : Any ) -> Optional[Any]: # Initialize image_processing UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) UpperCAmelCase_ : Union[str, Any] = [] for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input UpperCAmelCase_ : str = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test batched UpperCAmelCase_ : List[Any] = image_processing(_A , _A , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test not batched input (PIL images) UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs() UpperCAmelCase_ : List[str] = image_processing(_A , _A , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test batched input (PIL images) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = prepare_semantic_batch_inputs() UpperCAmelCase_ : int = image_processing(_A , _A , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) def A ( self : List[Any] ) -> Union[str, Any]: # Initialize image_processing UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs() UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 1_50 ) UpperCAmelCase_ : int = True UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
304
1
'''simple docstring''' import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels _UpperCamelCase : Optional[int] = object() # For specifying empty leaf dict `{}` _UpperCamelCase : List[Any] = object() def __UpperCAmelCase ( A : Union[str, Any] , A : List[str] ) -> str: UpperCAmelCase_ : Tuple = tuple((re.compile(x + '''$''' ) for x in qs) ) for i in range(len(A ) - len(A ) + 1 ): UpperCAmelCase_ : Dict = [x.match(A ) for x, y in zip(A , ks[i:] )] if matches and all(A ): return True return False def __UpperCAmelCase ( A : Dict ) -> Union[str, Any]: def replace(A : Tuple , A : Dict ): for rule, replacement in rules: if _match(A , A ): return replacement return val return replace def __UpperCAmelCase ( ) -> List[str]: return [ # embeddings (("transformer", "wpe", "embedding"), P('''mp''' , A )), (("transformer", "wte", "embedding"), P('''mp''' , A )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(A , '''mp''' )), (("attention", "out_proj", "kernel"), P('''mp''' , A )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(A , '''mp''' )), (("mlp", "c_fc", "bias"), P('''mp''' )), (("mlp", "c_proj", "kernel"), P('''mp''' , A )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def __UpperCAmelCase ( A : Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = _get_partition_rules() UpperCAmelCase_ : Any = _replacement_rules(A ) UpperCAmelCase_ : Any = {k: _unmatched for k in flatten_dict(A )} UpperCAmelCase_ : List[str] = {k: replace(A , A ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(A ) )
304
'''simple docstring''' import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class snake_case__ ( enum.Enum): a_ = 0 a_ = 1 a_ = 2 @add_end_docstrings(UpperCamelCase) class snake_case__ ( UpperCamelCase): a_ = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n " def __init__( self : List[str] , *_A : Dict , **_A : int ) -> Optional[int]: super().__init__(*_A , **_A ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. UpperCAmelCase_ : Dict = None if self.model.config.prefix is not None: UpperCAmelCase_ : Tuple = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. UpperCAmelCase_ : Optional[Any] = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self._sanitize_parameters(prefix=_A , **self._forward_params ) UpperCAmelCase_ : int = {**self._preprocess_params, **preprocess_params} UpperCAmelCase_ : List[str] = {**self._forward_params, **forward_params} def A ( self : Union[str, Any] , _A : int=None , _A : str=None , _A : Union[str, Any]=None , _A : List[Any]=None , _A : List[Any]=None , _A : int=None , _A : Optional[int]=None , _A : List[Any]=None , **_A : List[Any] , ) -> Dict: UpperCAmelCase_ : Union[str, Any] = {} if prefix is not None: UpperCAmelCase_ : List[Any] = prefix if prefix: UpperCAmelCase_ : Tuple = self.tokenizer( _A , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) UpperCAmelCase_ : List[Any] = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected" ''' [None, \'hole\']''' ) UpperCAmelCase_ : Union[str, Any] = handle_long_generation preprocess_params.update(_A ) UpperCAmelCase_ : Optional[int] = generate_kwargs UpperCAmelCase_ : Tuple = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) UpperCAmelCase_ : int = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) UpperCAmelCase_ : List[Any] = ReturnType.TENSORS if return_type is not None: UpperCAmelCase_ : List[Any] = return_type if clean_up_tokenization_spaces is not None: UpperCAmelCase_ : List[Any] = clean_up_tokenization_spaces if stop_sequence is not None: UpperCAmelCase_ : Any = self.tokenizer.encode(_A , add_special_tokens=_A ) if len(_A ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) UpperCAmelCase_ : str = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def A ( self : Dict , *_A : Optional[Any] , **_A : Any ) -> Any: # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*_A , **_A ) def __call__( self : List[Any] , _A : Union[str, Any] , **_A : List[str] ) -> Dict: return super().__call__(_A , **_A ) def A ( self : List[Any] , _A : List[Any] , _A : Any="" , _A : Dict=None , **_A : Dict ) -> Optional[Any]: UpperCAmelCase_ : Tuple = self.tokenizer( prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) UpperCAmelCase_ : str = prompt_text if handle_long_generation == "hole": UpperCAmelCase_ : List[str] = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: UpperCAmelCase_ : Optional[int] = generate_kwargs['''max_new_tokens'''] else: UpperCAmelCase_ : Union[str, Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: UpperCAmelCase_ : Dict = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) UpperCAmelCase_ : List[str] = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: UpperCAmelCase_ : Optional[int] = inputs['''attention_mask'''][:, -keep_length:] return inputs def A ( self : List[str] , _A : Optional[Any] , **_A : str ) -> Optional[int]: UpperCAmelCase_ : Any = model_inputs['''input_ids'''] UpperCAmelCase_ : Dict = model_inputs.get('''attention_mask''' , _A ) # Allow empty prompts if input_ids.shape[1] == 0: UpperCAmelCase_ : Any = None UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : Union[str, Any] = 1 else: UpperCAmelCase_ : Optional[int] = input_ids.shape[0] UpperCAmelCase_ : Dict = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. UpperCAmelCase_ : List[str] = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: UpperCAmelCase_ : str = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: UpperCAmelCase_ : Any = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length UpperCAmelCase_ : Optional[Any] = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL UpperCAmelCase_ : Union[str, Any] = self.model.generate(input_ids=_A , attention_mask=_A , **_A ) UpperCAmelCase_ : Any = generated_sequence.shape[0] if self.framework == "pt": UpperCAmelCase_ : List[str] = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": UpperCAmelCase_ : int = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def A ( self : int , _A : List[Any] , _A : Dict=ReturnType.FULL_TEXT , _A : Dict=True ) -> Union[str, Any]: UpperCAmelCase_ : List[str] = model_outputs['''generated_sequence'''][0] UpperCAmelCase_ : int = model_outputs['''input_ids'''] UpperCAmelCase_ : str = model_outputs['''prompt_text'''] UpperCAmelCase_ : Any = generated_sequence.numpy().tolist() UpperCAmelCase_ : int = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: UpperCAmelCase_ : Optional[Any] = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text UpperCAmelCase_ : Any = self.tokenizer.decode( _A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: UpperCAmelCase_ : List[str] = 0 else: UpperCAmelCase_ : str = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) ) if return_type == ReturnType.FULL_TEXT: UpperCAmelCase_ : Dict = prompt_text + text[prompt_length:] else: UpperCAmelCase_ : Dict = text[prompt_length:] UpperCAmelCase_ : List[str] = {'''generated_text''': all_text} records.append(_A ) return records
304
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _UpperCamelCase : Optional[Any] = logging.get_logger(__name__) class snake_case__ ( UpperCamelCase , UpperCamelCase): a_ = "maskformer-swin" a_ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : str , _A : str=2_24 , _A : Tuple=4 , _A : Optional[int]=3 , _A : Optional[int]=96 , _A : Any=[2, 2, 6, 2] , _A : Optional[Any]=[3, 6, 12, 24] , _A : str=7 , _A : int=4.0 , _A : Any=True , _A : List[Any]=0.0 , _A : Optional[Any]=0.0 , _A : Any=0.1 , _A : Any="gelu" , _A : Union[str, Any]=False , _A : Tuple=0.02 , _A : Tuple=1e-5 , _A : Dict=None , _A : int=None , **_A : str , ) -> List[Any]: super().__init__(**_A ) UpperCAmelCase_ : Union[str, Any] = image_size UpperCAmelCase_ : List[Any] = patch_size UpperCAmelCase_ : Union[str, Any] = num_channels UpperCAmelCase_ : int = embed_dim UpperCAmelCase_ : Tuple = depths UpperCAmelCase_ : Tuple = len(_A ) UpperCAmelCase_ : Any = num_heads UpperCAmelCase_ : Optional[int] = window_size UpperCAmelCase_ : List[str] = mlp_ratio UpperCAmelCase_ : Tuple = qkv_bias UpperCAmelCase_ : Dict = hidden_dropout_prob UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob UpperCAmelCase_ : str = drop_path_rate UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : Optional[int] = use_absolute_embeddings UpperCAmelCase_ : List[str] = layer_norm_eps UpperCAmelCase_ : int = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCAmelCase_ : List[Any] = int(embed_dim * 2 ** (len(_A ) - 1) ) UpperCAmelCase_ : Optional[int] = ['''stem'''] + [F"stage{idx}" for idx in range(1 , len(_A ) + 1 )] UpperCAmelCase_ , UpperCAmelCase_ : str = get_aligned_output_features_output_indices( out_features=_A , out_indices=_A , stage_names=self.stage_names )
304
'''simple docstring''' from __future__ import annotations import math def __UpperCAmelCase ( A : int , A : int , A : bool , A : list[int] , A : float ) -> int: if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if not scores: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , ) if is_max else min( minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , ) ) def __UpperCAmelCase ( ) -> None: UpperCAmelCase_ : List[str] = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3] UpperCAmelCase_ : List[Any] = math.log(len(A ) , 2 ) print(F"Optimal value : {minimax(0 , 0 , A , A , A )}" ) if __name__ == "__main__": import doctest doctest.testmod() main()
304
1
'''simple docstring''' def __UpperCAmelCase ( A : list[int] , A : int ) -> bool: UpperCAmelCase_ : Optional[int] = len(A ) UpperCAmelCase_ : Tuple = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): UpperCAmelCase_ : Optional[Any] = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): UpperCAmelCase_ : List[str] = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: UpperCAmelCase_ : str = subset[i - 1][j] if arr[i - 1] <= j: UpperCAmelCase_ : Optional[int] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
304
'''simple docstring''' from __future__ import annotations def __UpperCAmelCase ( A : list , A : int , A : int , A : int ) -> list: UpperCAmelCase_ : Any = [] UpperCAmelCase_ , UpperCAmelCase_ : Tuple = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) UpperCAmelCase_ : List[Any] = result + left + right return input_list def __UpperCAmelCase ( A : list ) -> list: if len(A ) <= 1: return input_list UpperCAmelCase_ : List[str] = list(A ) # iteration for two-way merging UpperCAmelCase_ : Tuple = 2 while p <= len(A ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(A ) , A ): UpperCAmelCase_ : Union[str, Any] = i UpperCAmelCase_ : int = i + p - 1 UpperCAmelCase_ : Any = (low + high + 1) // 2 UpperCAmelCase_ : Union[str, Any] = merge(A , A , A , A ) # final merge of last two parts if p * 2 >= len(A ): UpperCAmelCase_ : str = i UpperCAmelCase_ : Tuple = merge(A , 0 , A , len(A ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": _UpperCamelCase : str = input('Enter numbers separated by a comma:\n').strip() if user_input == "": _UpperCamelCase : List[str] = [] else: _UpperCamelCase : Optional[int] = [int(item.strip()) for item in user_input.split(',')] print(iter_merge_sort(unsorted))
304
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _UpperCamelCase : Tuple = { 'configuration_blenderbot_small': [ 'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotSmallConfig', 'BlenderbotSmallOnnxConfig', ], 'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : str = ['BlenderbotSmallTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Optional[int] = [ 'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotSmallForCausalLM', 'BlenderbotSmallForConditionalGeneration', 'BlenderbotSmallModel', 'BlenderbotSmallPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Dict = [ 'TFBlenderbotSmallForConditionalGeneration', 'TFBlenderbotSmallModel', 'TFBlenderbotSmallPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : str = [ 'FlaxBlenderbotSmallForConditionalGeneration', 'FlaxBlenderbotSmallModel', 'FlaxBlenderbotSmallPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys _UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
304
'''simple docstring''' from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class snake_case__ : a_ = 42 # [batch_size x 3] a_ = 42 # [batch_size x 3] a_ = 42 # [batch_size x 3] a_ = 42 # [batch_size x 3] a_ = 42 a_ = 42 a_ = 42 a_ = 42 a_ = 42 def A ( self : Tuple ) -> Optional[int]: assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def A ( self : List[Any] ) -> Union[str, Any]: return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def A ( self : Any ) -> Optional[Any]: return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def A ( self : Optional[int] ) -> torch.Tensor: UpperCAmelCase_ : Dict = torch.arange(self.height * self.width ) UpperCAmelCase_ : int = torch.stack( [ pixel_indices % self.width, torch.div(_A , self.width , rounding_mode='''trunc''' ), ] , axis=1 , ) return coords @property def A ( self : Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ , *UpperCAmelCase_ : Union[str, Any] = self.shape UpperCAmelCase_ : Optional[Any] = int(np.prod(_A ) ) UpperCAmelCase_ : Any = self.get_image_coords() UpperCAmelCase_ : Any = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) UpperCAmelCase_ : Union[str, Any] = self.get_camera_rays(_A ) UpperCAmelCase_ : str = rays.view(_A , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def A ( self : Optional[int] , _A : torch.Tensor ) -> torch.Tensor: UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] UpperCAmelCase_ : Dict = coords.view(_A , -1 , 2 ) UpperCAmelCase_ : Union[str, Any] = self.resolution() UpperCAmelCase_ : int = self.fov() UpperCAmelCase_ : Dict = (flat.float() / (res - 1)) * 2 - 1 UpperCAmelCase_ : Optional[int] = fracs * torch.tan(fov / 2 ) UpperCAmelCase_ : Any = fracs.view(_A , -1 , 2 ) UpperCAmelCase_ : List[Any] = ( self.z.view(_A , 1 , 3 ) + self.x.view(_A , 1 , 3 ) * fracs[:, :, :1] + self.y.view(_A , 1 , 3 ) * fracs[:, :, 1:] ) UpperCAmelCase_ : Optional[Any] = directions / directions.norm(dim=-1 , keepdim=_A ) UpperCAmelCase_ : Union[str, Any] = torch.stack( [ torch.broadcast_to(self.origin.view(_A , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(_A , *_A , 2 , 3 ) def A ( self : Tuple , _A : int , _A : int ) -> "DifferentiableProjectiveCamera": assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=_A , height=_A , x_fov=self.x_fov , y_fov=self.y_fov , ) def __UpperCAmelCase ( A : int ) -> DifferentiableProjectiveCamera: UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : str = [] for theta in np.linspace(0 , 2 * np.pi , num=2_0 ): UpperCAmelCase_ : str = np.array([np.sin(A ), np.cos(A ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) UpperCAmelCase_ : Optional[int] = -z * 4 UpperCAmelCase_ : Optional[int] = np.array([np.cos(A ), -np.sin(A ), 0.0] ) UpperCAmelCase_ : List[Any] = np.cross(A , A ) origins.append(A ) xs.append(A ) ys.append(A ) zs.append(A ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(A , axis=0 ) ).float() , x=torch.from_numpy(np.stack(A , axis=0 ) ).float() , y=torch.from_numpy(np.stack(A , axis=0 ) ).float() , z=torch.from_numpy(np.stack(A , axis=0 ) ).float() , width=A , height=A , x_fov=0.7 , y_fov=0.7 , shape=(1, len(A )) , )
304
1
'''simple docstring''' from copy import deepcopy class snake_case__ : def __init__( self : Dict , _A : list[int] | None = None , _A : int | None = None ) -> None: if arr is None and size is not None: UpperCAmelCase_ : List[str] = size UpperCAmelCase_ : Any = [0] * size elif arr is not None: self.init(_A ) else: raise ValueError('''Either arr or size must be specified''' ) def A ( self : Optional[Any] , _A : list[int] ) -> None: UpperCAmelCase_ : Tuple = len(_A ) UpperCAmelCase_ : Union[str, Any] = deepcopy(_A ) for i in range(1 , self.size ): UpperCAmelCase_ : Union[str, Any] = self.next_(_A ) if j < self.size: self.tree[j] += self.tree[i] def A ( self : Optional[int] ) -> list[int]: UpperCAmelCase_ : Any = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): UpperCAmelCase_ : int = self.next_(_A ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def A ( _A : int ) -> int: return index + (index & (-index)) @staticmethod def A ( _A : int ) -> int: return index - (index & (-index)) def A ( self : List[str] , _A : int , _A : int ) -> None: if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value UpperCAmelCase_ : List[Any] = self.next_(_A ) def A ( self : Dict , _A : int , _A : int ) -> None: self.add(_A , value - self.get(_A ) ) def A ( self : List[Any] , _A : int ) -> int: if right == 0: return 0 UpperCAmelCase_ : Union[str, Any] = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] UpperCAmelCase_ : Dict = self.prev(_A ) return result def A ( self : List[Any] , _A : int , _A : int ) -> int: return self.prefix(_A ) - self.prefix(_A ) def A ( self : List[Any] , _A : int ) -> int: return self.query(_A , index + 1 ) def A ( self : Tuple , _A : int ) -> int: value -= self.tree[0] if value < 0: return -1 UpperCAmelCase_ : Union[str, Any] = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 UpperCAmelCase_ : int = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
304
'''simple docstring''' import random class snake_case__ : @staticmethod def A ( _A : str ) -> tuple[list[int], list[int]]: UpperCAmelCase_ : Dict = [ord(_A ) for i in text] UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : Any = [] for i in plain: UpperCAmelCase_ : int = random.randint(1 , 3_00 ) UpperCAmelCase_ : str = (i + k) * k cipher.append(_A ) key.append(_A ) return cipher, key @staticmethod def A ( _A : list[int] , _A : list[int] ) -> str: UpperCAmelCase_ : Dict = [] for i in range(len(_A ) ): UpperCAmelCase_ : int = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(_A ) ) return "".join(_A ) if __name__ == "__main__": _UpperCamelCase , _UpperCamelCase : Any = Onepad().encrypt('Hello') print(c, k) print(Onepad().decrypt(c, k))
304
1
'''simple docstring''' from abc import ABC, abstractmethod from argparse import ArgumentParser class snake_case__ ( UpperCamelCase): @staticmethod @abstractmethod def A ( _A : ArgumentParser ) -> Optional[Any]: raise NotImplementedError() @abstractmethod def A ( self : List[Any] ) -> Optional[int]: raise NotImplementedError()
304
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _UpperCamelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class snake_case__ ( UpperCamelCase , unittest.TestCase): a_ = ReformerTokenizer a_ = ReformerTokenizerFast a_ = True a_ = False a_ = True def A ( self : Optional[Any] ) -> List[Any]: super().setUp() UpperCAmelCase_ : Tuple = ReformerTokenizer(_A , keep_accents=_A ) tokenizer.save_pretrained(self.tmpdirname ) def A ( self : Optional[Any] ) -> Any: UpperCAmelCase_ : List[Any] = '''<s>''' UpperCAmelCase_ : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def A ( self : Any ) -> str: UpperCAmelCase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(_A ) , 10_00 ) def A ( self : Optional[int] ) -> int: self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def A ( self : Optional[Any] ) -> List[Any]: if not self.test_rust_tokenizer: return UpperCAmelCase_ : int = self.get_tokenizer() UpperCAmelCase_ : Tuple = self.get_rust_tokenizer() UpperCAmelCase_ : Any = '''I was born in 92000, and this is falsé.''' UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize(_A ) UpperCAmelCase_ : Optional[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) UpperCAmelCase_ : List[str] = tokenizer.encode(_A , add_special_tokens=_A ) UpperCAmelCase_ : int = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) UpperCAmelCase_ : Tuple = self.get_rust_tokenizer() UpperCAmelCase_ : Dict = tokenizer.encode(_A ) UpperCAmelCase_ : List[str] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def A ( self : Tuple , _A : Dict=15 ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(_A , **_A ) # Simple input UpperCAmelCase_ : Optional[int] = '''This is a simple input''' UpperCAmelCase_ : List[str] = ['''This is a simple input 1''', '''This is a simple input 2'''] UpperCAmelCase_ : Union[str, Any] = ('''This is a simple input''', '''This is a pair''') UpperCAmelCase_ : Dict = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' ) # Simple input self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' ) # Simple input self.assertRaises( _A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , ) # Pair input self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' ) # Pair input self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' ) # Pair input self.assertRaises( _A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , ) def A ( self : Union[str, Any] ) -> int: pass def A ( self : int ) -> Any: UpperCAmelCase_ : Any = ReformerTokenizer(_A , keep_accents=_A ) UpperCAmelCase_ : List[str] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_A ) , [2_85, 46, 10, 1_70, 3_82] , ) UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) UpperCAmelCase_ : List[str] = tokenizer.convert_tokens_to_ids(_A ) self.assertListEqual( _A , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def A ( self : List[str] ) -> Optional[int]: return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' ) @slow def A ( self : str ) -> str: UpperCAmelCase_ : Tuple = '''Hello World!''' UpperCAmelCase_ : int = [1_26, 32, 2_62, 1_52, 38, 72, 2_87] self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @slow def A ( self : List[Any] ) -> str: UpperCAmelCase_ : Tuple = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) UpperCAmelCase_ : int = [ 1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 35, 28, 2_75, 3, 2_59, 2_97, 2_60, 84, 4, 35, 1_10, 44, 8, 2_59, 91, 2_68, 21, 11, 2_09, 2_74, 1_09, 2_66, 2_77, 1_17, 86, 93, 3_15, 2_58, 2_78, 2_58, 2_77, 2_58, 0, 2_58, 2_88, 2_58, 3_19, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 2_87, 2_58, 3_15, 2_58, 2_89, 2_58, 2_78, 99, 2_69, 2_66, 2_62, 8, 2_59, 2_41, 4, 2_17, 2_30, 2_68, 2_66, 55, 1_68, 1_06, 75, 1_93, 2_66, 2_23, 27, 49, 26, 2_82, 25, 2_64, 2_99, 19, 26, 0, 2_58, 2_77, 1_17, 86, 93, 1_76, 1_83, 2_70, 11, 2_62, 42, 61, 2_65, ] self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @require_torch @slow def A ( self : List[str] ) -> Optional[int]: import torch from transformers import ReformerConfig, ReformerModel # Build sequence UpperCAmelCase_ : int = list(self.big_tokenizer.get_vocab().keys() )[:10] UpperCAmelCase_ : List[Any] = ''' '''.join(_A ) UpperCAmelCase_ : str = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' ) UpperCAmelCase_ : Any = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' ) UpperCAmelCase_ : List[Any] = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) UpperCAmelCase_ : Any = encoded_sequence['''input_ids'''].shape UpperCAmelCase_ : Optional[int] = ReformerModel(_A ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**_A ) model(**_A ) @slow def A ( self : int ) -> Optional[Any]: # fmt: off UpperCAmelCase_ : int = {'''input_ids''': [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 UpperCAmelCase_ : Optional[Any] = [ '''This is a very simple sentence.''', '''The quick brown fox jumps over the lazy dog.''', ] self.tokenizer_integration_test_util( expected_encoding=_A , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=_A , sequences=_A , )
304
1
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class snake_case__ ( unittest.TestCase): def A ( self : Optional[int] ) -> Dict: UpperCAmelCase_ : Dict = inspect.getfile(accelerate.test_utils ) UpperCAmelCase_ : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) UpperCAmelCase_ : int = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] ) UpperCAmelCase_ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] ) @require_multi_gpu def A ( self : Optional[int] ) -> Optional[Any]: print(F"Found {torch.cuda.device_count()} devices." ) UpperCAmelCase_ : Optional[Any] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_A , env=os.environ.copy() ) @require_multi_gpu def A ( self : Dict ) -> List[str]: print(F"Found {torch.cuda.device_count()} devices." ) UpperCAmelCase_ : Union[str, Any] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path] print(F"Command: {cmd}" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_A , env=os.environ.copy() ) @require_multi_gpu def A ( self : Any ) -> Tuple: UpperCAmelCase_ : Optional[Any] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(_A , env=os.environ.copy() ) @require_multi_gpu def A ( self : Dict ) -> Optional[int]: print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" ) UpperCAmelCase_ : Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ): execute_subprocess_async(_A , env=os.environ.copy() ) if __name__ == "__main__": _UpperCamelCase : Optional[Any] = Accelerator() _UpperCamelCase : str = (accelerator.state.process_index + 2, 10) _UpperCamelCase : List[str] = torch.randint(0, 10, shape).to(accelerator.device) _UpperCamelCase : List[str] = '' _UpperCamelCase : Any = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." _UpperCamelCase : List[str] = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." _UpperCamelCase : Tuple = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
304
'''simple docstring''' from __future__ import annotations def __UpperCAmelCase ( A : str ) -> list[int]: return [ord(A ) - 9_6 for elem in plain] def __UpperCAmelCase ( A : list[int] ) -> str: return "".join(chr(elem + 9_6 ) for elem in encoded ) def __UpperCAmelCase ( ) -> None: UpperCAmelCase_ : Tuple = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''' , A ) print('''Decoded:''' , decode(A ) ) if __name__ == "__main__": main()
304
1
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _UpperCamelCase : List[str] = logging.get_logger(__name__) # General docstring _UpperCamelCase : str = 'RegNetConfig' # Base docstring _UpperCamelCase : List[Any] = 'facebook/regnet-y-040' _UpperCamelCase : str = [1, 1_088, 7, 7] # Image classification docstring _UpperCamelCase : Tuple = 'facebook/regnet-y-040' _UpperCamelCase : Union[str, Any] = 'tabby, tabby cat' _UpperCamelCase : List[str] = [ 'facebook/regnet-y-040', # See all regnet models at https://huggingface.co/models?filter=regnet ] class snake_case__ ( tf.keras.layers.Layer): def __init__( self : List[Any] , _A : int , _A : int = 3 , _A : int = 1 , _A : int = 1 , _A : Optional[str] = "relu" , **_A : Optional[int] , ) -> List[str]: super().__init__(**_A ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb UpperCAmelCase_ : List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) UpperCAmelCase_ : str = tf.keras.layers.ConvaD( filters=_A , kernel_size=_A , strides=_A , padding='''VALID''' , groups=_A , use_bias=_A , name='''convolution''' , ) UpperCAmelCase_ : Tuple = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) UpperCAmelCase_ : List[str] = ACTaFN[activation] if activation is not None else tf.identity def A ( self : int , _A : Optional[int] ) -> List[str]: UpperCAmelCase_ : Tuple = self.convolution(self.padding(_A ) ) UpperCAmelCase_ : Union[str, Any] = self.normalization(_A ) UpperCAmelCase_ : int = self.activation(_A ) return hidden_state class snake_case__ ( tf.keras.layers.Layer): def __init__( self : int , _A : RegNetConfig , **_A : Union[str, Any] ) -> Any: super().__init__(**_A ) UpperCAmelCase_ : Tuple = config.num_channels UpperCAmelCase_ : List[Any] = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , ) def A ( self : Dict , _A : int ) -> Any: UpperCAmelCase_ : Union[str, Any] = shape_list(_A )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) UpperCAmelCase_ : Optional[Any] = tf.transpose(_A , perm=(0, 2, 3, 1) ) UpperCAmelCase_ : int = self.embedder(_A ) return hidden_state class snake_case__ ( tf.keras.layers.Layer): def __init__( self : Optional[int] , _A : int , _A : int = 2 , **_A : str ) -> Any: super().__init__(**_A ) UpperCAmelCase_ : Dict = tf.keras.layers.ConvaD( filters=_A , kernel_size=1 , strides=_A , use_bias=_A , name='''convolution''' ) UpperCAmelCase_ : str = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' ) def A ( self : Optional[Any] , _A : tf.Tensor , _A : bool = False ) -> tf.Tensor: return self.normalization(self.convolution(_A ) , training=_A ) class snake_case__ ( tf.keras.layers.Layer): def __init__( self : Union[str, Any] , _A : int , _A : int , **_A : str ) -> Optional[Any]: super().__init__(**_A ) UpperCAmelCase_ : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' ) UpperCAmelCase_ : str = [ tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''relu''' , name='''attention.0''' ), tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ), ] def A ( self : int , _A : List[Any] ) -> Optional[Any]: # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] UpperCAmelCase_ : Union[str, Any] = self.pooler(_A ) for layer_module in self.attention: UpperCAmelCase_ : List[Any] = layer_module(_A ) UpperCAmelCase_ : str = hidden_state * pooled return hidden_state class snake_case__ ( tf.keras.layers.Layer): def __init__( self : int , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : Tuple ) -> Union[str, Any]: super().__init__(**_A ) UpperCAmelCase_ : int = in_channels != out_channels or stride != 1 UpperCAmelCase_ : str = max(1 , out_channels // config.groups_width ) UpperCAmelCase_ : str = ( TFRegNetShortCut(_A , stride=_A , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. UpperCAmelCase_ : List[Any] = [ TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ), TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.2''' ), ] UpperCAmelCase_ : str = ACTaFN[config.hidden_act] def A ( self : str , _A : Optional[Any] ) -> List[Any]: UpperCAmelCase_ : int = hidden_state for layer_module in self.layers: UpperCAmelCase_ : Tuple = layer_module(_A ) UpperCAmelCase_ : Tuple = self.shortcut(_A ) hidden_state += residual UpperCAmelCase_ : int = self.activation(_A ) return hidden_state class snake_case__ ( tf.keras.layers.Layer): def __init__( self : str , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : Dict ) -> Any: super().__init__(**_A ) UpperCAmelCase_ : Union[str, Any] = in_channels != out_channels or stride != 1 UpperCAmelCase_ : str = max(1 , out_channels // config.groups_width ) UpperCAmelCase_ : List[str] = ( TFRegNetShortCut(_A , stride=_A , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) UpperCAmelCase_ : Any = [ TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( _A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ), TFRegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ), TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.3''' ), ] UpperCAmelCase_ : List[str] = ACTaFN[config.hidden_act] def A ( self : Optional[Any] , _A : int ) -> List[Any]: UpperCAmelCase_ : Dict = hidden_state for layer_module in self.layers: UpperCAmelCase_ : Dict = layer_module(_A ) UpperCAmelCase_ : List[Any] = self.shortcut(_A ) hidden_state += residual UpperCAmelCase_ : Any = self.activation(_A ) return hidden_state class snake_case__ ( tf.keras.layers.Layer): def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 2 , _A : int = 2 , **_A : Dict ) -> Union[str, Any]: super().__init__(**_A ) UpperCAmelCase_ : Dict = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer UpperCAmelCase_ : Tuple = [ # downsampling is done in the first layer with stride of 2 layer(_A , _A , _A , stride=_A , name='''layers.0''' ), *[layer(_A , _A , _A , name=F"layers.{i+1}" ) for i in range(depth - 1 )], ] def A ( self : Optional[int] , _A : List[str] ) -> Optional[int]: for layer_module in self.layers: UpperCAmelCase_ : Optional[Any] = layer_module(_A ) return hidden_state class snake_case__ ( tf.keras.layers.Layer): def __init__( self : Union[str, Any] , _A : RegNetConfig , **_A : Optional[int] ) -> Optional[int]: super().__init__(**_A ) UpperCAmelCase_ : Dict = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( _A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) ) UpperCAmelCase_ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(_A , config.depths[1:] ) ): self.stages.append(TFRegNetStage(_A , _A , _A , depth=_A , name=F"stages.{i+1}" ) ) def A ( self : Dict , _A : tf.Tensor , _A : bool = False , _A : bool = True ) -> TFBaseModelOutputWithNoAttention: UpperCAmelCase_ : int = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: UpperCAmelCase_ : Tuple = hidden_states + (hidden_state,) UpperCAmelCase_ : Union[str, Any] = stage_module(_A ) if output_hidden_states: UpperCAmelCase_ : Union[str, Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A ) @keras_serializable class snake_case__ ( tf.keras.layers.Layer): a_ = RegNetConfig def __init__( self : Tuple , _A : Any , **_A : Union[str, Any] ) -> Tuple: super().__init__(**_A ) UpperCAmelCase_ : Dict = config UpperCAmelCase_ : Optional[Any] = TFRegNetEmbeddings(_A , name='''embedder''' ) UpperCAmelCase_ : Any = TFRegNetEncoder(_A , name='''encoder''' ) UpperCAmelCase_ : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' ) @unpack_inputs def A ( self : int , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention: UpperCAmelCase_ : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase_ : List[str] = self.embedder(_A , training=_A ) UpperCAmelCase_ : int = self.encoder( _A , output_hidden_states=_A , return_dict=_A , training=_A ) UpperCAmelCase_ : Tuple = encoder_outputs[0] UpperCAmelCase_ : str = self.pooler(_A ) # Change to NCHW output format have uniformity in the modules UpperCAmelCase_ : str = tf.transpose(_A , perm=(0, 3, 1, 2) ) UpperCAmelCase_ : Dict = tf.transpose(_A , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: UpperCAmelCase_ : int = tuple([tf.transpose(_A , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_A , pooler_output=_A , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class snake_case__ ( UpperCamelCase): a_ = RegNetConfig a_ = "regnet" a_ = "pixel_values" @property def A ( self : Optional[Any] ) -> Dict: return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )} _UpperCamelCase : Union[str, Any] = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n' _UpperCamelCase : int = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , UpperCamelCase , ) class snake_case__ ( UpperCamelCase): def __init__( self : Union[str, Any] , _A : RegNetConfig , *_A : int , **_A : List[str] ) -> List[str]: super().__init__(_A , *_A , **_A ) UpperCAmelCase_ : Any = TFRegNetMainLayer(_A , name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def A ( self : Any , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : Dict=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]: UpperCAmelCase_ : Union[str, Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase_ : str = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase_ : Union[str, Any] = self.regnet( pixel_values=_A , output_hidden_states=_A , return_dict=_A , training=_A , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCamelCase , ) class snake_case__ ( UpperCamelCase , UpperCamelCase): def __init__( self : List[str] , _A : RegNetConfig , *_A : List[Any] , **_A : List[Any] ) -> Any: super().__init__(_A , *_A , **_A ) UpperCAmelCase_ : Union[str, Any] = config.num_labels UpperCAmelCase_ : Dict = TFRegNetMainLayer(_A , name='''regnet''' ) # classification head UpperCAmelCase_ : Any = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def A ( self : int , _A : tf.Tensor = None , _A : tf.Tensor = None , _A : bool = None , _A : bool = None , _A : List[Any]=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: UpperCAmelCase_ : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase_ : Any = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase_ : str = self.regnet( _A , output_hidden_states=_A , return_dict=_A , training=_A ) UpperCAmelCase_ : int = outputs.pooler_output if return_dict else outputs[1] UpperCAmelCase_ : Union[str, Any] = self.classifier[0](_A ) UpperCAmelCase_ : List[Any] = self.classifier[1](_A ) UpperCAmelCase_ : Optional[int] = None if labels is None else self.hf_compute_loss(labels=_A , logits=_A ) if not return_dict: UpperCAmelCase_ : List[str] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
304
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
304
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _UpperCamelCase : Tuple = logging.get_logger(__name__) _UpperCamelCase : Dict = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.linear_k': 'encoder.layers.*.self_attn.linear_k', 'self_attn.linear_v': 'encoder.layers.*.self_attn.linear_v', 'self_attn.linear_q': 'encoder.layers.*.self_attn.linear_q', 'self_attn.pos_bias_u': 'encoder.layers.*.self_attn.pos_bias_u', 'self_attn.pos_bias_v': 'encoder.layers.*.self_attn.pos_bias_v', 'self_attn.linear_out': 'encoder.layers.*.self_attn.linear_out', 'self_attn.linear_pos': 'encoder.layers.*.self_attn.linear_pos', 'self_attn.rotary_emb': 'encoder.embed_positions', 'self_attn_layer_norm': 'encoder.layers.*.self_attn_layer_norm', 'conv_module.pointwise_conv1': 'encoder.layers.*.conv_module.pointwise_conv1', 'conv_module.pointwise_conv2': 'encoder.layers.*.conv_module.pointwise_conv2', 'conv_module.depthwise_conv': 'encoder.layers.*.conv_module.depthwise_conv', 'conv_module.batch_norm': 'encoder.layers.*.conv_module.batch_norm', 'conv_module.layer_norm': 'encoder.layers.*.conv_module.layer_norm', 'ffn1.w_1': 'encoder.layers.*.ffn1.intermediate_dense', 'ffn1.w_2': 'encoder.layers.*.ffn1.output_dense', 'ffn1.layer_norm': 'encoder.layers.*.ffn1_layer_norm', 'ffn2.w_1': 'encoder.layers.*.ffn2.intermediate_dense', 'ffn2.w_2': 'encoder.layers.*.ffn2.output_dense', 'ffn2.layer_norm': 'encoder.layers.*.ffn2_layer_norm', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } _UpperCamelCase : Tuple = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def __UpperCAmelCase ( A : Optional[Any] , A : List[str] , A : Tuple , A : Union[str, Any] , A : str ) -> Any: for attribute in key.split('''.''' ): UpperCAmelCase_ : str = getattr(A , A ) if weight_type is not None: UpperCAmelCase_ : Optional[int] = getattr(A , A ).shape else: UpperCAmelCase_ : List[Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": UpperCAmelCase_ : Any = value elif weight_type == "weight_g": UpperCAmelCase_ : str = value elif weight_type == "weight_v": UpperCAmelCase_ : int = value elif weight_type == "bias": UpperCAmelCase_ : Union[str, Any] = value elif weight_type == "running_mean": UpperCAmelCase_ : int = value elif weight_type == "running_var": UpperCAmelCase_ : List[Any] = value elif weight_type == "num_batches_tracked": UpperCAmelCase_ : List[str] = value elif weight_type == "inv_freq": UpperCAmelCase_ : List[str] = value else: UpperCAmelCase_ : Dict = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __UpperCAmelCase ( A : Optional[Any] , A : Optional[int] , A : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : List[str] = fairseq_model.state_dict() UpperCAmelCase_ : Any = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase_ : List[str] = False if "conv_layers" in name: load_conv_layer( A , A , A , A , hf_model.config.feat_extract_norm == '''group''' , ) UpperCAmelCase_ : Any = True else: for key, mapped_key in MAPPING.items(): UpperCAmelCase_ : str = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: UpperCAmelCase_ : Union[str, Any] = True if "*" in mapped_key: UpperCAmelCase_ : List[str] = name.split(A )[0].split('''.''' )[-2] UpperCAmelCase_ : Tuple = mapped_key.replace('''*''' , A ) if "pos_bias_u" in name: UpperCAmelCase_ : Optional[int] = None elif "pos_bias_v" in name: UpperCAmelCase_ : Tuple = None elif "weight_g" in name: UpperCAmelCase_ : int = '''weight_g''' elif "weight_v" in name: UpperCAmelCase_ : Tuple = '''weight_v''' elif "bias" in name: UpperCAmelCase_ : str = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase_ : int = '''weight''' elif "running_mean" in name: UpperCAmelCase_ : int = '''running_mean''' elif "inv_freq" in name: UpperCAmelCase_ : Optional[Any] = '''inv_freq''' elif "running_var" in name: UpperCAmelCase_ : int = '''running_var''' elif "num_batches_tracked" in name: UpperCAmelCase_ : str = '''num_batches_tracked''' else: UpperCAmelCase_ : int = None set_recursively(A , A , A , A , A ) continue if not is_used: unused_weights.append(A ) logger.warning(F"Unused weights: {unused_weights}" ) def __UpperCAmelCase ( A : Dict , A : Optional[int] , A : str , A : Any , A : int ) -> Union[str, Any]: UpperCAmelCase_ : Union[str, Any] = full_name.split('''conv_layers.''' )[-1] UpperCAmelCase_ : Union[str, Any] = name.split('''.''' ) UpperCAmelCase_ : Union[str, Any] = int(items[0] ) UpperCAmelCase_ : str = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) UpperCAmelCase_ : Tuple = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) UpperCAmelCase_ : str = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) UpperCAmelCase_ : Optional[int] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) UpperCAmelCase_ : Dict = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(A ) @torch.no_grad() def __UpperCAmelCase ( A : Dict , A : List[str] , A : Optional[int]=None , A : List[str]=None , A : int=True ) -> int: if config_path is not None: UpperCAmelCase_ : List[str] = WavaVecaConformerConfig.from_pretrained(A , hidden_act='''swish''' ) else: UpperCAmelCase_ : Optional[int] = WavaVecaConformerConfig() if "rope" in checkpoint_path: UpperCAmelCase_ : List[Any] = '''rotary''' if is_finetuned: if dict_path: UpperCAmelCase_ : List[str] = Dictionary.load(A ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase_ : Optional[Any] = target_dict.pad_index UpperCAmelCase_ : Optional[Any] = target_dict.bos_index UpperCAmelCase_ : List[str] = target_dict.eos_index UpperCAmelCase_ : List[Any] = len(target_dict.symbols ) UpperCAmelCase_ : Optional[int] = os.path.join(A , '''vocab.json''' ) if not os.path.isdir(A ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(A ) ) return os.makedirs(A , exist_ok=A ) UpperCAmelCase_ : Optional[Any] = target_dict.indices # fairseq has the <pad> and <s> switched UpperCAmelCase_ : List[str] = 0 UpperCAmelCase_ : Optional[Any] = 1 with open(A , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(A , A ) UpperCAmelCase_ : Optional[Any] = WavaVecaCTCTokenizer( A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=A , ) UpperCAmelCase_ : List[Any] = True if config.feat_extract_norm == '''layer''' else False UpperCAmelCase_ : List[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=A , return_attention_mask=A , ) UpperCAmelCase_ : Union[str, Any] = WavaVecaProcessor(feature_extractor=A , tokenizer=A ) processor.save_pretrained(A ) UpperCAmelCase_ : Any = WavaVecaConformerForCTC(A ) else: UpperCAmelCase_ : str = WavaVecaConformerForPreTraining(A ) if is_finetuned: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: UpperCAmelCase_ : List[str] = argparse.Namespace(task='''audio_pretraining''' ) UpperCAmelCase_ : Tuple = fairseq.tasks.setup_task(A ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A ) UpperCAmelCase_ : Optional[int] = model[0].eval() recursively_load_weights(A , A , not is_finetuned ) hf_wavavec.save_pretrained(A ) if __name__ == "__main__": _UpperCamelCase : int = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) _UpperCamelCase : List[Any] = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
304
'''simple docstring''' def __UpperCAmelCase ( A : int ) -> list: # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError('''The given input must be positive''' ) # get the generated string sequence UpperCAmelCase_ : int = gray_code_sequence_string(A ) # # convert them to integers for i in range(len(A ) ): UpperCAmelCase_ : List[str] = int(sequence[i] , 2 ) return sequence def __UpperCAmelCase ( A : int ) -> list: # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] UpperCAmelCase_ : Tuple = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits UpperCAmelCase_ : List[str] = gray_code_sequence_string(bit_count - 1 ) UpperCAmelCase_ : int = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): UpperCAmelCase_ : Union[str, Any] = '''0''' + smaller_sequence[i] sequence.append(A ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): UpperCAmelCase_ : Dict = '''1''' + smaller_sequence[i] sequence.append(A ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
304
1
'''simple docstring''' import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class snake_case__ : def __init__( self : Optional[Any] , _A : List[Any]=2 , _A : Tuple=3 , _A : Union[str, Any]=64 , _A : int=None ) -> Any: UpperCAmelCase_ : Any = np.random.default_rng(_A ) UpperCAmelCase_ : List[str] = length UpperCAmelCase_ : List[str] = rng.normal(size=(length,) ).astype(np.floataa ) UpperCAmelCase_ : Optional[int] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self : Union[str, Any] ) -> Union[str, Any]: return self.length def __getitem__( self : List[Any] , _A : Optional[Any] ) -> List[Any]: return {"x": self.x[i], "y": self.y[i]} class snake_case__ ( torch.nn.Module): def __init__( self : Union[str, Any] , _A : Optional[int]=0 , _A : List[str]=0 , _A : Optional[Any]=False ) -> List[str]: super().__init__() UpperCAmelCase_ : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) UpperCAmelCase_ : int = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) UpperCAmelCase_ : str = True def A ( self : List[str] , _A : List[Any]=None ) -> Optional[Any]: if self.first_batch: print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" ) UpperCAmelCase_ : str = False return x * self.a[0] + self.b[0] class snake_case__ ( torch.nn.Module): def __init__( self : List[Any] , _A : Union[str, Any]=0 , _A : Optional[int]=0 , _A : Union[str, Any]=False ) -> str: super().__init__() UpperCAmelCase_ : str = torch.nn.Parameter(torch.tensor(_A ).float() ) UpperCAmelCase_ : str = torch.nn.Parameter(torch.tensor(_A ).float() ) UpperCAmelCase_ : List[Any] = True def A ( self : Optional[Any] , _A : List[str]=None ) -> Tuple: if self.first_batch: print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" ) UpperCAmelCase_ : Optional[int] = False return x * self.a + self.b def __UpperCAmelCase ( A : List[Any] , A : int = 1_6 ) -> Tuple: from datasets import load_dataset from transformers import AutoTokenizer UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' ) UpperCAmelCase_ : Union[str, Any] = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''} UpperCAmelCase_ : List[Any] = load_dataset('''csv''' , data_files=A ) UpperCAmelCase_ : Dict = datasets['''train'''].unique('''label''' ) UpperCAmelCase_ : Dict = {v: i for i, v in enumerate(A )} def tokenize_function(A : Dict ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_ : Any = tokenizer( examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A , padding='''max_length''' ) if "label" in examples: UpperCAmelCase_ : Any = [label_to_id[l] for l in examples['''label''']] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCAmelCase_ : Tuple = datasets.map( A , batched=A , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , ) def collate_fn(A : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(A , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' ) return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. UpperCAmelCase_ : List[str] = DataLoader(tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=2 ) UpperCAmelCase_ : str = DataLoader(tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=1 ) return train_dataloader, eval_dataloader
304
'''simple docstring''' import logging from transformers.configuration_utils import PretrainedConfig _UpperCamelCase : Any = logging.getLogger(__name__) class snake_case__ ( UpperCamelCase): a_ = "masked_bert" def __init__( self : str , _A : Dict=3_05_22 , _A : Dict=7_68 , _A : Union[str, Any]=12 , _A : str=12 , _A : str=30_72 , _A : Dict="gelu" , _A : int=0.1 , _A : Optional[Any]=0.1 , _A : Any=5_12 , _A : Union[str, Any]=2 , _A : Union[str, Any]=0.02 , _A : int=1e-12 , _A : Any=0 , _A : Any="topK" , _A : List[str]="constant" , _A : Dict=0.0 , **_A : int , ) -> Union[str, Any]: super().__init__(pad_token_id=_A , **_A ) UpperCAmelCase_ : Union[str, Any] = vocab_size UpperCAmelCase_ : str = hidden_size UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : Optional[int] = num_attention_heads UpperCAmelCase_ : Optional[Any] = hidden_act UpperCAmelCase_ : str = intermediate_size UpperCAmelCase_ : int = hidden_dropout_prob UpperCAmelCase_ : Tuple = attention_probs_dropout_prob UpperCAmelCase_ : Optional[Any] = max_position_embeddings UpperCAmelCase_ : List[str] = type_vocab_size UpperCAmelCase_ : str = initializer_range UpperCAmelCase_ : Union[str, Any] = layer_norm_eps UpperCAmelCase_ : Optional[int] = pruning_method UpperCAmelCase_ : Optional[int] = mask_init UpperCAmelCase_ : List[Any] = mask_scale
304
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING _UpperCamelCase : Optional[Any] = logging.get_logger(__name__) _UpperCamelCase : List[Any] = { 'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class snake_case__ ( UpperCamelCase): a_ = "deformable_detr" a_ = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self : Union[str, Any] , _A : int=True , _A : Optional[Any]=None , _A : str=3 , _A : Dict=3_00 , _A : Dict=10_24 , _A : Any=6 , _A : List[Any]=10_24 , _A : Tuple=8 , _A : List[Any]=6 , _A : Dict=10_24 , _A : Any=8 , _A : Optional[Any]=0.0 , _A : List[Any]=True , _A : Any="relu" , _A : int=2_56 , _A : Optional[int]=0.1 , _A : List[str]=0.0 , _A : int=0.0 , _A : Tuple=0.02 , _A : Any=1.0 , _A : Any=True , _A : List[str]=False , _A : Optional[Any]="sine" , _A : List[str]="resnet50" , _A : str=True , _A : str=False , _A : Dict=4 , _A : List[Any]=4 , _A : Union[str, Any]=4 , _A : List[Any]=False , _A : Tuple=3_00 , _A : Any=False , _A : List[Any]=1 , _A : List[str]=5 , _A : Optional[int]=2 , _A : Optional[Any]=1 , _A : int=1 , _A : Dict=5 , _A : int=2 , _A : int=0.1 , _A : Tuple=0.25 , _A : int=False , **_A : List[str] , ) -> Optional[int]: if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) UpperCAmelCase_ : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(_A , _A ): UpperCAmelCase_ : Optional[Any] = backbone_config.get('''model_type''' ) UpperCAmelCase_ : Any = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase_ : Optional[Any] = config_class.from_dict(_A ) UpperCAmelCase_ : int = use_timm_backbone UpperCAmelCase_ : List[Any] = backbone_config UpperCAmelCase_ : int = num_channels UpperCAmelCase_ : Union[str, Any] = num_queries UpperCAmelCase_ : Union[str, Any] = max_position_embeddings UpperCAmelCase_ : Optional[int] = d_model UpperCAmelCase_ : Dict = encoder_ffn_dim UpperCAmelCase_ : List[str] = encoder_layers UpperCAmelCase_ : List[Any] = encoder_attention_heads UpperCAmelCase_ : Optional[int] = decoder_ffn_dim UpperCAmelCase_ : Union[str, Any] = decoder_layers UpperCAmelCase_ : Tuple = decoder_attention_heads UpperCAmelCase_ : Tuple = dropout UpperCAmelCase_ : int = attention_dropout UpperCAmelCase_ : Union[str, Any] = activation_dropout UpperCAmelCase_ : int = activation_function UpperCAmelCase_ : List[str] = init_std UpperCAmelCase_ : List[str] = init_xavier_std UpperCAmelCase_ : List[Any] = encoder_layerdrop UpperCAmelCase_ : Optional[int] = auxiliary_loss UpperCAmelCase_ : Dict = position_embedding_type UpperCAmelCase_ : List[Any] = backbone UpperCAmelCase_ : Any = use_pretrained_backbone UpperCAmelCase_ : List[str] = dilation # deformable attributes UpperCAmelCase_ : Dict = num_feature_levels UpperCAmelCase_ : List[Any] = encoder_n_points UpperCAmelCase_ : List[str] = decoder_n_points UpperCAmelCase_ : Optional[Any] = two_stage UpperCAmelCase_ : Dict = two_stage_num_proposals UpperCAmelCase_ : Union[str, Any] = with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher UpperCAmelCase_ : Any = class_cost UpperCAmelCase_ : Optional[Any] = bbox_cost UpperCAmelCase_ : List[str] = giou_cost # Loss coefficients UpperCAmelCase_ : Any = mask_loss_coefficient UpperCAmelCase_ : Optional[Any] = dice_loss_coefficient UpperCAmelCase_ : Dict = bbox_loss_coefficient UpperCAmelCase_ : Tuple = giou_loss_coefficient UpperCAmelCase_ : Dict = eos_coefficient UpperCAmelCase_ : List[Any] = focal_alpha UpperCAmelCase_ : Optional[Any] = disable_custom_kernels super().__init__(is_encoder_decoder=_A , **_A ) @property def A ( self : Any ) -> int: return self.encoder_attention_heads @property def A ( self : Union[str, Any] ) -> int: return self.d_model def A ( self : List[str] ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: UpperCAmelCase_ : List[str] = self.backbone_config.to_dict() UpperCAmelCase_ : List[Any] = self.__class__.model_type return output
304
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase): a_ = StableDiffusionDiffEditPipeline a_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"} a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"} a_ = frozenset( []) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess a_ = frozenset([]) def A ( self : Tuple ) -> Optional[Any]: torch.manual_seed(0 ) UpperCAmelCase_ : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , ) UpperCAmelCase_ : Optional[Any] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , ) UpperCAmelCase_ : Optional[int] = DDIMInverseScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_zero=_A , ) torch.manual_seed(0 ) UpperCAmelCase_ : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) UpperCAmelCase_ : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , ) UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_A ) UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCAmelCase_ : Optional[int] = { '''unet''': unet, '''scheduler''': scheduler, '''inverse_scheduler''': inverse_scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def A ( self : str , _A : List[str] , _A : Any=0 ) -> str: UpperCAmelCase_ : Optional[Any] = floats_tensor((1, 16, 16) , rng=random.Random(_A ) ).to(_A ) UpperCAmelCase_ : Dict = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_A ) ).to(_A ) if str(_A ).startswith('''mps''' ): UpperCAmelCase_ : Any = torch.manual_seed(_A ) else: UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase_ : str = { '''prompt''': '''a dog and a newt''', '''mask_image''': mask, '''image_latents''': latents, '''generator''': generator, '''num_inference_steps''': 2, '''inpaint_strength''': 1.0, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def A ( self : Tuple , _A : Optional[Any] , _A : Optional[Any]=0 ) -> List[str]: UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ : int = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ) if str(_A ).startswith('''mps''' ): UpperCAmelCase_ : Dict = torch.manual_seed(_A ) else: UpperCAmelCase_ : Any = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase_ : Optional[Any] = { '''image''': image, '''source_prompt''': '''a cat and a frog''', '''target_prompt''': '''a dog and a newt''', '''generator''': generator, '''num_inference_steps''': 2, '''num_maps_per_mask''': 2, '''mask_encode_strength''': 1.0, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def A ( self : int , _A : Tuple , _A : List[str]=0 ) -> Any: UpperCAmelCase_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) UpperCAmelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ) if str(_A ).startswith('''mps''' ): UpperCAmelCase_ : Optional[int] = torch.manual_seed(_A ) else: UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase_ : Optional[int] = { '''image''': image, '''prompt''': '''a cat and a frog''', '''generator''': generator, '''num_inference_steps''': 2, '''inpaint_strength''': 1.0, '''guidance_scale''': 6.0, '''decode_latents''': True, '''output_type''': '''numpy''', } return inputs def A ( self : List[str] ) -> Optional[Any]: if not hasattr(self.pipeline_class , '''_optional_components''' ): return UpperCAmelCase_ : str = self.get_dummy_components() UpperCAmelCase_ : Any = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(_A , _A , _A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_A ) UpperCAmelCase_ : str = pipe(**_A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_A ) UpperCAmelCase_ : Any = self.pipeline_class.from_pretrained(_A ) pipe_loaded.to(_A ) pipe_loaded.set_progress_bar_config(disable=_A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(_A , _A ) is None , F"`{optional_component}` did not stay set to None after loading." , ) UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_A ) UpperCAmelCase_ : List[Any] = pipe_loaded(**_A )[0] UpperCAmelCase_ : Any = np.abs(output - output_loaded ).max() self.assertLess(_A , 1e-4 ) def A ( self : Tuple ) -> int: UpperCAmelCase_ : Optional[Any] = '''cpu''' UpperCAmelCase_ : Any = self.get_dummy_components() UpperCAmelCase_ : Optional[int] = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Union[str, Any] = self.get_dummy_mask_inputs(_A ) UpperCAmelCase_ : int = pipe.generate_mask(**_A ) UpperCAmelCase_ : Tuple = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) UpperCAmelCase_ : List[Any] = np.array([0] * 9 ) UpperCAmelCase_ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(_A , 1e-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def A ( self : str ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = '''cpu''' UpperCAmelCase_ : str = self.get_dummy_components() UpperCAmelCase_ : str = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Optional[Any] = self.get_dummy_inversion_inputs(_A ) UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images UpperCAmelCase_ : List[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase_ : int = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) UpperCAmelCase_ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_A , 1e-3 ) def A ( self : Tuple ) -> Optional[Any]: super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def A ( self : str ) -> Tuple: UpperCAmelCase_ : Any = '''cpu''' UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components() UpperCAmelCase_ : Any = {'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''} UpperCAmelCase_ : Any = DPMSolverMultistepScheduler(**_A ) UpperCAmelCase_ : Optional[Any] = DPMSolverMultistepInverseScheduler(**_A ) UpperCAmelCase_ : Union[str, Any] = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inversion_inputs(_A ) UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images UpperCAmelCase_ : Tuple = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase_ : List[Any] = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) UpperCAmelCase_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_A , 1e-3 ) @require_torch_gpu @slow class snake_case__ ( unittest.TestCase): def A ( self : Optional[Any] ) -> Optional[int]: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def A ( cls : Dict ) -> List[Any]: UpperCAmelCase_ : Optional[int] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' ) UpperCAmelCase_ : int = raw_image.convert('''RGB''' ).resize((7_68, 7_68) ) UpperCAmelCase_ : Any = raw_image def A ( self : List[Any] ) -> List[str]: UpperCAmelCase_ : int = torch.manual_seed(0 ) UpperCAmelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa ) UpperCAmelCase_ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase_ : List[str] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit''' UpperCAmelCase_ : Tuple = '''a bowl of pears''' UpperCAmelCase_ : Optional[int] = pipe.generate_mask( image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , ) UpperCAmelCase_ : List[str] = pipe.invert( prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A ).latents UpperCAmelCase_ : Any = pipe( prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0] UpperCAmelCase_ : str = ( np.array( load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/diffedit/pears.png''' ).resize((7_68, 7_68) ) ) / 2_55 ) assert np.abs((expected_image - image).max() ) < 5e-1 def A ( self : Tuple ) -> List[str]: UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Any = StableDiffusionDiffEditPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa ) UpperCAmelCase_ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase_ : Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit''' UpperCAmelCase_ : Dict = '''a bowl of pears''' UpperCAmelCase_ : Union[str, Any] = pipe.generate_mask( image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , ) UpperCAmelCase_ : List[Any] = pipe.invert( prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A , num_inference_steps=25 , ).latents UpperCAmelCase_ : Dict = pipe( prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0] UpperCAmelCase_ : Tuple = ( np.array( load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/diffedit/pears.png''' ).resize((7_68, 7_68) ) ) / 2_55 ) assert np.abs((expected_image - image).max() ) < 5e-1
304
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCamelCase : Any = { 'configuration_jukebox': [ 'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'JukeboxConfig', 'JukeboxPriorConfig', 'JukeboxVQVAEConfig', ], 'tokenization_jukebox': ['JukeboxTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Union[str, Any] = [ 'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'JukeboxModel', 'JukeboxPreTrainedModel', 'JukeboxVQVAE', 'JukeboxPrior', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys _UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
304
'''simple docstring''' import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case__ ( UpperCamelCase): def A ( self : List[str] ) -> List[Any]: UpperCAmelCase_ : int = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) ) self.parent.assertTrue(hasattr(_A , '''num_heads''' ) ) class snake_case__ : def __init__( self : List[Any] , _A : List[str] , _A : Optional[Any]=13 , _A : List[str]=64 , _A : Tuple=3 , _A : int=[16, 48, 96] , _A : int=[1, 3, 6] , _A : Union[str, Any]=[1, 2, 10] , _A : List[Any]=[7, 3, 3] , _A : Optional[Any]=[4, 2, 2] , _A : List[Any]=[2, 1, 1] , _A : Union[str, Any]=[2, 2, 2] , _A : Tuple=[False, False, True] , _A : str=[0.0, 0.0, 0.0] , _A : List[Any]=0.02 , _A : int=1e-12 , _A : Optional[int]=True , _A : List[str]=True , _A : Union[str, Any]=2 , ) -> List[Any]: UpperCAmelCase_ : int = parent UpperCAmelCase_ : List[Any] = batch_size UpperCAmelCase_ : Any = image_size UpperCAmelCase_ : Tuple = patch_sizes UpperCAmelCase_ : int = patch_stride UpperCAmelCase_ : Any = patch_padding UpperCAmelCase_ : List[Any] = is_training UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : Union[str, Any] = num_labels UpperCAmelCase_ : List[str] = num_channels UpperCAmelCase_ : int = embed_dim UpperCAmelCase_ : Optional[int] = num_heads UpperCAmelCase_ : Tuple = stride_kv UpperCAmelCase_ : Optional[Any] = depth UpperCAmelCase_ : Dict = cls_token UpperCAmelCase_ : Dict = attention_drop_rate UpperCAmelCase_ : Any = initializer_range UpperCAmelCase_ : List[str] = layer_norm_eps def A ( self : int ) -> List[str]: UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase_ : List[str] = self.get_config() return config, pixel_values, labels def A ( self : List[str] ) -> int: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def A ( self : Dict , _A : List[Any] , _A : Tuple , _A : Optional[Any] ) -> List[str]: UpperCAmelCase_ : List[Any] = CvtModel(config=_A ) model.to(_A ) model.eval() UpperCAmelCase_ : Tuple = model(_A ) UpperCAmelCase_ : List[str] = (self.image_size, self.image_size) UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image_size[0], image_size[1] for i in range(len(self.depth ) ): UpperCAmelCase_ : int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) UpperCAmelCase_ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def A ( self : Any , _A : int , _A : str , _A : Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : str = self.num_labels UpperCAmelCase_ : str = CvtForImageClassification(_A ) model.to(_A ) model.eval() UpperCAmelCase_ : int = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Dict ) -> Any: UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = config_and_inputs UpperCAmelCase_ : Optional[int] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase): a_ = (CvtModel, CvtForImageClassification) if is_torch_available() else () a_ = ( {"feature-extraction": CvtModel, "image-classification": CvtForImageClassification} if is_torch_available() else {} ) a_ = False a_ = False a_ = False a_ = False a_ = False def A ( self : int ) -> List[str]: UpperCAmelCase_ : Optional[int] = CvtModelTester(self ) UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def A ( self : Any ) -> Dict: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A ( self : int ) -> List[str]: return @unittest.skip(reason='''Cvt does not output attentions''' ) def A ( self : Optional[int] ) -> Optional[int]: pass @unittest.skip(reason='''Cvt does not use inputs_embeds''' ) def A ( self : Any ) -> Optional[Any]: pass @unittest.skip(reason='''Cvt does not support input and output embeddings''' ) def A ( self : List[Any] ) -> Any: pass def A ( self : int ) -> str: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Tuple = model_class(_A ) UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Tuple = [*signature.parameters.keys()] UpperCAmelCase_ : str = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _A ) def A ( self : Tuple ) -> int: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def A ( self : Dict ) -> List[str]: def check_hidden_states_output(_A : Dict , _A : str , _A : int ): UpperCAmelCase_ : str = model_class(_A ) model.to(_A ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(_A , _A ) ) UpperCAmelCase_ : Optional[Any] = outputs.hidden_states UpperCAmelCase_ : Any = len(self.model_tester.depth ) self.assertEqual(len(_A ) , _A ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = True check_hidden_states_output(_A , _A , _A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Dict = True check_hidden_states_output(_A , _A , _A ) def A ( self : Union[str, Any] ) -> List[str]: UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def A ( self : List[Any] ) -> Optional[Any]: pass @slow def A ( self : Optional[int] ) -> int: for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[Any] = CvtModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def __UpperCAmelCase ( ) -> str: UpperCAmelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class snake_case__ ( unittest.TestCase): @cached_property def A ( self : Union[str, Any] ) -> Union[str, Any]: return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def A ( self : str ) -> str: UpperCAmelCase_ : str = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_A ) UpperCAmelCase_ : Optional[int] = self.default_image_processor UpperCAmelCase_ : List[str] = prepare_img() UpperCAmelCase_ : List[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Any = model(**_A ) # verify the logits UpperCAmelCase_ : Tuple = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , _A ) UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(_A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
304
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase : str = logging.get_logger(__name__) _UpperCamelCase : List[Any] = { 'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json', } class snake_case__ ( UpperCamelCase): a_ = "nllb-moe" a_ = ["past_key_values"] a_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Optional[int] , _A : Any=12_81_12 , _A : List[Any]=10_24 , _A : Tuple=12 , _A : Optional[Any]=40_96 , _A : Union[str, Any]=16 , _A : Optional[Any]=12 , _A : Optional[int]=40_96 , _A : Tuple=16 , _A : int=0.05 , _A : Dict=0.05 , _A : Optional[int]=True , _A : str=True , _A : Optional[int]="relu" , _A : Union[str, Any]=10_24 , _A : Optional[Any]=0.1 , _A : Union[str, Any]=0.1 , _A : List[str]=0.0 , _A : Optional[Any]=0.02 , _A : Union[str, Any]=2 , _A : List[Any]=True , _A : Optional[int]=False , _A : Optional[Any]="float32" , _A : Optional[int]=False , _A : List[str]=1_28 , _A : List[str]=64 , _A : Dict=4 , _A : Tuple=4 , _A : Union[str, Any]=0.001 , _A : Optional[int]=0.001 , _A : int="all" , _A : str=False , _A : Optional[Any]=False , _A : int=1.0 , _A : Dict=0.2 , _A : Any=1 , _A : Tuple=0 , _A : int=2 , _A : Dict=False , **_A : List[str] , ) -> List[Any]: UpperCAmelCase_ : Optional[int] = vocab_size UpperCAmelCase_ : Optional[int] = max_position_embeddings UpperCAmelCase_ : str = d_model UpperCAmelCase_ : Tuple = encoder_ffn_dim UpperCAmelCase_ : Any = encoder_layers UpperCAmelCase_ : Tuple = encoder_attention_heads UpperCAmelCase_ : Optional[int] = decoder_ffn_dim UpperCAmelCase_ : Tuple = decoder_layers UpperCAmelCase_ : List[str] = decoder_attention_heads UpperCAmelCase_ : Dict = dropout UpperCAmelCase_ : Tuple = attention_dropout UpperCAmelCase_ : Tuple = activation_dropout UpperCAmelCase_ : int = activation_function UpperCAmelCase_ : Dict = init_std UpperCAmelCase_ : int = encoder_layerdrop UpperCAmelCase_ : str = decoder_layerdrop UpperCAmelCase_ : Tuple = use_cache UpperCAmelCase_ : int = encoder_layers UpperCAmelCase_ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase_ : Union[str, Any] = router_z_loss_coef UpperCAmelCase_ : List[Any] = router_aux_loss_coef UpperCAmelCase_ : Dict = decoder_sparse_step UpperCAmelCase_ : List[Any] = encoder_sparse_step UpperCAmelCase_ : Optional[int] = num_experts UpperCAmelCase_ : Dict = expert_capacity UpperCAmelCase_ : Tuple = router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" ) UpperCAmelCase_ : Union[str, Any] = router_dtype UpperCAmelCase_ : Tuple = router_ignore_padding_tokens UpperCAmelCase_ : Optional[int] = batch_prioritized_routing UpperCAmelCase_ : Any = second_expert_policy UpperCAmelCase_ : Tuple = normalize_router_prob_before_dropping UpperCAmelCase_ : Tuple = moe_eval_capacity_token_fraction UpperCAmelCase_ : Optional[int] = moe_token_dropout UpperCAmelCase_ : str = output_router_logits super().__init__( pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , **_A , )
304
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase) class snake_case__ ( UpperCamelCase): a_ = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True}) a_ = Features({"text": Value("string")}) a_ = Features({}) a_ = "text" @property def A ( self : List[str] ) -> Dict[str, str]: return {self.text_column: "text"}
304
1
'''simple docstring''' from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar _UpperCamelCase : Union[str, Any] = TypeVar('T') class snake_case__ ( Generic[T]): def __init__( self : Any , _A : T ) -> int: UpperCAmelCase_ : str = data UpperCAmelCase_ : Node[T] | None = None def __str__( self : List[str] ) -> str: return F"{self.data}" class snake_case__ ( Generic[T]): def __init__( self : Optional[Any] ) -> None: UpperCAmelCase_ : Node[T] | None = None def __iter__( self : Optional[int] ) -> Iterator[T]: UpperCAmelCase_ : Any = self.top while node: yield node.data UpperCAmelCase_ : Any = node.next def __str__( self : Union[str, Any] ) -> str: return "->".join([str(_A ) for item in self] ) def __len__( self : List[Any] ) -> int: return len(tuple(iter(self ) ) ) def A ( self : str ) -> bool: return self.top is None def A ( self : Tuple , _A : T ) -> None: UpperCAmelCase_ : Optional[int] = Node(_A ) if not self.is_empty(): UpperCAmelCase_ : List[str] = self.top UpperCAmelCase_ : Dict = node def A ( self : Optional[int] ) -> T: if self.is_empty(): raise IndexError('''pop from empty stack''' ) assert isinstance(self.top , _A ) UpperCAmelCase_ : List[Any] = self.top UpperCAmelCase_ : Optional[Any] = self.top.next return pop_node.data def A ( self : str ) -> T: if self.is_empty(): raise IndexError('''peek from empty stack''' ) assert self.top is not None return self.top.data def A ( self : List[Any] ) -> None: UpperCAmelCase_ : Dict = None if __name__ == "__main__": from doctest import testmod testmod()
304
'''simple docstring''' import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def __UpperCAmelCase ( A : int , A : Any="shi-labs/oneformer_demo" ) -> Dict: with open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) as f: UpperCAmelCase_ : Union[str, Any] = json.load(A ) UpperCAmelCase_ : Optional[int] = {} UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : str = [] for key, info in class_info.items(): UpperCAmelCase_ : Tuple = info['''name'''] class_names.append(info['''name'''] ) if info["isthing"]: thing_ids.append(int(A ) ) UpperCAmelCase_ : Any = thing_ids UpperCAmelCase_ : Union[str, Any] = class_names return metadata class snake_case__ ( unittest.TestCase): def __init__( self : Any , _A : str , _A : Optional[int]=7 , _A : Tuple=3 , _A : Tuple=30 , _A : List[Any]=4_00 , _A : Tuple=None , _A : Optional[Any]=True , _A : Optional[Any]=True , _A : Any=[0.5, 0.5, 0.5] , _A : Any=[0.5, 0.5, 0.5] , _A : List[str]=10 , _A : Optional[int]=False , _A : Union[str, Any]=2_55 , _A : List[Any]="shi-labs/oneformer_demo" , _A : str="ade20k_panoptic.json" , _A : List[Any]=10 , ) -> Any: UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : Optional[Any] = batch_size UpperCAmelCase_ : Optional[Any] = num_channels UpperCAmelCase_ : Tuple = min_resolution UpperCAmelCase_ : Optional[int] = max_resolution UpperCAmelCase_ : Dict = do_resize UpperCAmelCase_ : Tuple = {'''shortest_edge''': 32, '''longest_edge''': 13_33} if size is None else size UpperCAmelCase_ : int = do_normalize UpperCAmelCase_ : List[Any] = image_mean UpperCAmelCase_ : Dict = image_std UpperCAmelCase_ : str = class_info_file UpperCAmelCase_ : Optional[Any] = prepare_metadata(_A , _A ) UpperCAmelCase_ : Tuple = num_text UpperCAmelCase_ : Union[str, Any] = repo_path # for the post_process_functions UpperCAmelCase_ : Any = 2 UpperCAmelCase_ : Dict = 10 UpperCAmelCase_ : int = 10 UpperCAmelCase_ : Optional[Any] = 3 UpperCAmelCase_ : str = 4 UpperCAmelCase_ : int = num_labels UpperCAmelCase_ : Union[str, Any] = do_reduce_labels UpperCAmelCase_ : str = ignore_index def A ( self : Dict ) -> List[Any]: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def A ( self : Any , _A : List[Any] , _A : List[str]=False ) -> Optional[Any]: if not batched: UpperCAmelCase_ : Any = image_inputs[0] if isinstance(_A , Image.Image ): UpperCAmelCase_ , UpperCAmelCase_ : Dict = image.size else: UpperCAmelCase_ , UpperCAmelCase_ : int = image.shape[1], image.shape[2] if w < h: UpperCAmelCase_ : Union[str, Any] = int(self.size['''shortest_edge'''] * h / w ) UpperCAmelCase_ : int = self.size['''shortest_edge'''] elif w > h: UpperCAmelCase_ : List[Any] = self.size['''shortest_edge'''] UpperCAmelCase_ : Any = int(self.size['''shortest_edge'''] * w / h ) else: UpperCAmelCase_ : Dict = self.size['''shortest_edge'''] UpperCAmelCase_ : str = self.size['''shortest_edge'''] else: UpperCAmelCase_ : Dict = [] for image in image_inputs: UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase_ : int = max(_A , key=lambda _A : item[0] )[0] UpperCAmelCase_ : List[str] = max(_A , key=lambda _A : item[1] )[1] return expected_height, expected_width def A ( self : Tuple ) -> str: return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class snake_case__ ( UpperCamelCase , unittest.TestCase): a_ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string a_ = image_processing_class def A ( self : Optional[int] ) -> Any: UpperCAmelCase_ : int = OneFormerImageProcessorTester(self ) @property def A ( self : Any ) -> int: return self.image_processing_tester.prepare_image_processor_dict() def A ( self : Optional[Any] ) -> List[Any]: UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''ignore_index''' ) ) self.assertTrue(hasattr(_A , '''class_info_file''' ) ) self.assertTrue(hasattr(_A , '''num_text''' ) ) self.assertTrue(hasattr(_A , '''repo_path''' ) ) self.assertTrue(hasattr(_A , '''metadata''' ) ) self.assertTrue(hasattr(_A , '''do_reduce_labels''' ) ) def A ( self : Dict ) -> Dict: pass def A ( self : Tuple ) -> Dict: # Initialize image_processor UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input UpperCAmelCase_ : str = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A , batched=_A ) UpperCAmelCase_ : int = image_processor( _A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def A ( self : Tuple ) -> Tuple: # Initialize image_processor UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input UpperCAmelCase_ : List[str] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.image_processing_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ , UpperCAmelCase_ : str = self.image_processing_tester.get_expected_values(_A , batched=_A ) UpperCAmelCase_ : Tuple = image_processor( _A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def A ( self : Dict ) -> Union[str, Any]: # Initialize image_processor UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input UpperCAmelCase_ : int = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.image_processing_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ , UpperCAmelCase_ : int = self.image_processing_tester.get_expected_values(_A , batched=_A ) UpperCAmelCase_ : Optional[int] = image_processor( _A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def A ( self : int , _A : Any=False , _A : List[Any]=False , _A : Any="np" ) -> str: UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # prepare image and target UpperCAmelCase_ : Tuple = self.image_processing_tester.num_labels UpperCAmelCase_ : int = None UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A ) if with_segmentation_maps: UpperCAmelCase_ : Any = num_labels if is_instance_map: UpperCAmelCase_ : Any = list(range(_A ) ) * 2 UpperCAmelCase_ : Optional[Any] = dict(enumerate(_A ) ) UpperCAmelCase_ : Dict = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": UpperCAmelCase_ : Dict = [Image.fromarray(_A ) for annotation in annotations] UpperCAmelCase_ : Tuple = image_processor( _A , ['''semantic'''] * len(_A ) , _A , return_tensors='''pt''' , instance_id_to_semantic_id=_A , pad_and_return_pixel_mask=_A , ) return inputs def A ( self : int ) -> str: pass def A ( self : Tuple ) -> Union[str, Any]: def common(_A : Optional[int]=False , _A : str=None ): UpperCAmelCase_ : List[str] = self.comm_get_image_processor_inputs( with_segmentation_maps=_A , is_instance_map=_A , segmentation_type=_A ) UpperCAmelCase_ : List[Any] = inputs['''mask_labels'''] UpperCAmelCase_ : Optional[Any] = inputs['''class_labels'''] UpperCAmelCase_ : int = inputs['''pixel_values'''] UpperCAmelCase_ : Tuple = inputs['''text_inputs'''] # check the batch_size for mask_label, class_label, text_input in zip(_A , _A , _A ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(_A ) , self.image_processing_tester.num_text ) common() common(is_instance_map=_A ) common(is_instance_map=_A , segmentation_type='''pil''' ) common(is_instance_map=_A , segmentation_type='''pil''' ) def A ( self : List[Any] ) -> List[Any]: UpperCAmelCase_ : int = np.zeros((20, 50) ) UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : Dict = 1 UpperCAmelCase_ : List[Any] = 1 UpperCAmelCase_ : List[Any] = binary_mask_to_rle(_A ) self.assertEqual(len(_A ) , 4 ) self.assertEqual(rle[0] , 21 ) self.assertEqual(rle[1] , 45 ) def A ( self : Any ) -> List[Any]: UpperCAmelCase_ : int = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) UpperCAmelCase_ : Any = self.image_processing_tester.get_fake_oneformer_outputs() UpperCAmelCase_ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(_A ) self.assertEqual(len(_A ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) UpperCAmelCase_ : List[str] = [(1, 4) for i in range(self.image_processing_tester.batch_size )] UpperCAmelCase_ : Any = fature_extractor.post_process_semantic_segmentation(_A , target_sizes=_A ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def A ( self : Optional[Any] ) -> Tuple: UpperCAmelCase_ : Any = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) UpperCAmelCase_ : Dict = self.image_processing_tester.get_fake_oneformer_outputs() UpperCAmelCase_ : List[Any] = image_processor.post_process_instance_segmentation(_A , threshold=0 ) self.assertTrue(len(_A ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('''segmentation''' in el ) self.assertTrue('''segments_info''' in el ) self.assertEqual(type(el['''segments_info'''] ) , _A ) self.assertEqual( el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def A ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) UpperCAmelCase_ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs() UpperCAmelCase_ : List[Any] = image_processor.post_process_panoptic_segmentation(_A , threshold=0 ) self.assertTrue(len(_A ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('''segmentation''' in el ) self.assertTrue('''segments_info''' in el ) self.assertEqual(type(el['''segments_info'''] ) , _A ) self.assertEqual( el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
304
1
'''simple docstring''' import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase : int = logging.get_logger(__name__) set_seed(770) _UpperCamelCase : Optional[int] = { 'c_attn': 'att_proj', 'c_proj': 'out_proj', 'c_fc': 'in_proj', 'transformer.': '', 'h.': 'layers.', 'ln_1': 'layernorm_1', 'ln_2': 'layernorm_2', 'ln_f': 'layernorm_final', 'wpe': 'position_embeds_layer', 'wte': 'input_embeds_layer', } _UpperCamelCase : Optional[Any] = { 'text_small': { 'repo_id': 'suno/bark', 'file_name': 'text.pt', }, 'coarse_small': { 'repo_id': 'suno/bark', 'file_name': 'coarse.pt', }, 'fine_small': { 'repo_id': 'suno/bark', 'file_name': 'fine.pt', }, 'text': { 'repo_id': 'suno/bark', 'file_name': 'text_2.pt', }, 'coarse': { 'repo_id': 'suno/bark', 'file_name': 'coarse_2.pt', }, 'fine': { 'repo_id': 'suno/bark', 'file_name': 'fine_2.pt', }, } _UpperCamelCase : Optional[Any] = os.path.dirname(os.path.abspath(__file__)) _UpperCamelCase : Dict = os.path.join(os.path.expanduser('~'), '.cache') _UpperCamelCase : Optional[int] = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0') def __UpperCAmelCase ( A : Optional[int] , A : Any=False ) -> List[Any]: UpperCAmelCase_ : List[Any] = model_type if use_small: key += "_small" return os.path.join(A , REMOTE_MODEL_PATHS[key]['''file_name'''] ) def __UpperCAmelCase ( A : int , A : Tuple ) -> Union[str, Any]: os.makedirs(A , exist_ok=A ) hf_hub_download(repo_id=A , filename=A , local_dir=A ) def __UpperCAmelCase ( A : Optional[Any] , A : Union[str, Any] , A : Optional[int]=False , A : List[Any]="text" ) -> Optional[int]: if model_type == "text": UpperCAmelCase_ : Any = BarkSemanticModel UpperCAmelCase_ : Tuple = BarkSemanticConfig UpperCAmelCase_ : Optional[int] = BarkSemanticGenerationConfig elif model_type == "coarse": UpperCAmelCase_ : Optional[Any] = BarkCoarseModel UpperCAmelCase_ : int = BarkCoarseConfig UpperCAmelCase_ : Optional[int] = BarkCoarseGenerationConfig elif model_type == "fine": UpperCAmelCase_ : List[str] = BarkFineModel UpperCAmelCase_ : Dict = BarkFineConfig UpperCAmelCase_ : List[Any] = BarkFineGenerationConfig else: raise NotImplementedError() UpperCAmelCase_ : int = F"{model_type}_small" if use_small else model_type UpperCAmelCase_ : Optional[int] = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(A ): logger.info(F"{model_type} model not found, downloading into `{CACHE_DIR}`." ) _download(model_info['''repo_id'''] , model_info['''file_name'''] ) UpperCAmelCase_ : List[Any] = torch.load(A , map_location=A ) # this is a hack UpperCAmelCase_ : Tuple = checkpoint['''model_args'''] if "input_vocab_size" not in model_args: UpperCAmelCase_ : Optional[int] = model_args['''vocab_size'''] UpperCAmelCase_ : Tuple = model_args['''vocab_size'''] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments UpperCAmelCase_ : Optional[Any] = model_args.pop('''n_head''' ) UpperCAmelCase_ : Tuple = model_args.pop('''n_embd''' ) UpperCAmelCase_ : str = model_args.pop('''n_layer''' ) UpperCAmelCase_ : Dict = ConfigClass(**checkpoint['''model_args'''] ) UpperCAmelCase_ : Dict = ModelClass(config=A ) UpperCAmelCase_ : Dict = GenerationConfigClass() UpperCAmelCase_ : List[str] = model_generation_config UpperCAmelCase_ : int = checkpoint['''model'''] # fixup checkpoint UpperCAmelCase_ : Any = '''_orig_mod.''' for k, v in list(state_dict.items() ): if k.startswith(A ): # replace part of the key with corresponding layer name in HF implementation UpperCAmelCase_ : Any = k[len(A ) :] for old_layer_name in new_layer_name_dict: UpperCAmelCase_ : Union[str, Any] = new_k.replace(A , new_layer_name_dict[old_layer_name] ) UpperCAmelCase_ : Any = state_dict.pop(A ) UpperCAmelCase_ : List[Any] = set(state_dict.keys() ) - set(model.state_dict().keys() ) UpperCAmelCase_ : int = {k for k in extra_keys if not k.endswith('''.attn.bias''' )} UpperCAmelCase_ : List[Any] = set(model.state_dict().keys() ) - set(state_dict.keys() ) UpperCAmelCase_ : List[Any] = {k for k in missing_keys if not k.endswith('''.attn.bias''' )} if len(A ) != 0: raise ValueError(F"extra keys found: {extra_keys}" ) if len(A ) != 0: raise ValueError(F"missing keys: {missing_keys}" ) model.load_state_dict(A , strict=A ) UpperCAmelCase_ : Any = model.num_parameters(exclude_embeddings=A ) UpperCAmelCase_ : Any = checkpoint['''best_val_loss'''].item() logger.info(F"model loaded: {round(n_params/1e6 , 1 )}M params, {round(A , 3 )} loss" ) model.eval() model.to(A ) del checkpoint, state_dict return model def __UpperCAmelCase ( A : Optional[Any] , A : int=False , A : Tuple="text" ) -> Any: if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() UpperCAmelCase_ : str = '''cpu''' # do conversion on cpu UpperCAmelCase_ : Any = _get_ckpt_path(A , use_small=A ) UpperCAmelCase_ : int = _load_model(A , A , model_type=A , use_small=A ) # load bark initial model UpperCAmelCase_ : List[Any] = _bark_load_model(A , '''cpu''' , model_type=A , use_small=A ) if model_type == "text": UpperCAmelCase_ : int = bark_model['''model'''] if model.num_parameters(exclude_embeddings=A ) != bark_model.get_num_params(): raise ValueError('''initial and new models don\'t have the same number of parameters''' ) # check if same output as the bark model UpperCAmelCase_ : Tuple = 5 UpperCAmelCase_ : Any = 1_0 if model_type in ["text", "coarse"]: UpperCAmelCase_ : int = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int ) UpperCAmelCase_ : Union[str, Any] = bark_model(A )[0] UpperCAmelCase_ : Union[str, Any] = model(A ) # take last logits UpperCAmelCase_ : List[Any] = output_new_model_total.logits[:, [-1], :] else: UpperCAmelCase_ : Union[str, Any] = 3 UpperCAmelCase_ : Dict = 8 UpperCAmelCase_ : int = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) UpperCAmelCase_ : int = model(A , A ) UpperCAmelCase_ : Tuple = bark_model(A , A ) UpperCAmelCase_ : Tuple = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError('''initial and new outputs don\'t have the same shape''' ) if (output_new_model - output_old_model).abs().max().item() > 1e-3: raise ValueError('''initial and new outputs are not equal''' ) Path(A ).mkdir(exist_ok=A ) model.save_pretrained(A ) def __UpperCAmelCase ( A : str , A : Union[str, Any] , A : List[str] , A : Any , A : Dict , A : Union[str, Any] , ) -> Optional[Any]: UpperCAmelCase_ : Optional[Any] = os.path.join(A , A ) UpperCAmelCase_ : List[Any] = BarkSemanticConfig.from_pretrained(os.path.join(A , '''config.json''' ) ) UpperCAmelCase_ : Union[str, Any] = BarkCoarseConfig.from_pretrained(os.path.join(A , '''config.json''' ) ) UpperCAmelCase_ : Tuple = BarkFineConfig.from_pretrained(os.path.join(A , '''config.json''' ) ) UpperCAmelCase_ : List[Any] = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' ) UpperCAmelCase_ : List[str] = BarkSemanticModel.from_pretrained(A ) UpperCAmelCase_ : Union[str, Any] = BarkCoarseModel.from_pretrained(A ) UpperCAmelCase_ : Union[str, Any] = BarkFineModel.from_pretrained(A ) UpperCAmelCase_ : Tuple = EncodecModel.from_pretrained('''facebook/encodec_24khz''' ) UpperCAmelCase_ : Union[str, Any] = BarkConfig.from_sub_model_configs( A , A , A , A ) UpperCAmelCase_ : Optional[int] = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) UpperCAmelCase_ : List[str] = BarkModel(A ) UpperCAmelCase_ : Optional[int] = semantic UpperCAmelCase_ : Union[str, Any] = coarseAcoustic UpperCAmelCase_ : Any = fineAcoustic UpperCAmelCase_ : str = codec UpperCAmelCase_ : List[Any] = bark_generation_config Path(A ).mkdir(exist_ok=A ) bark.save_pretrained(A , repo_id=A , push_to_hub=A ) if __name__ == "__main__": _UpperCamelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument('model_type', type=str, help='text, coarse or fine.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.') _UpperCamelCase : Optional[Any] = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
304
'''simple docstring''' import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py _UpperCamelCase : Optional[int] = 'src/transformers' # This is to make sure the transformers module imported is the one in the repo. _UpperCamelCase : List[str] = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. _UpperCamelCase : Tuple = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') _UpperCamelCase : str = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _UpperCamelCase : Optional[int] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # Fill this with tuples (pipeline_tag, model_mapping, auto_model) _UpperCamelCase : List[str] = [ ('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'), ('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'), ('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'), ('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'), ('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'), ('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'), ('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'), ('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'), ('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'), ( 'zero-shot-object-detection', 'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForZeroShotObjectDetection', ), ('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'), ('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'), ('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'), ('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'), ( 'table-question-answering', 'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForTableQuestionAnswering', ), ('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'), ('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'), ( 'next-sentence-prediction', 'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES', 'AutoModelForNextSentencePrediction', ), ( 'audio-frame-classification', 'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioFrameClassification', ), ('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'), ( 'document-question-answering', 'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForDocumentQuestionAnswering', ), ( 'visual-question-answering', 'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForVisualQuestionAnswering', ), ('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'), ( 'zero-shot-image-classification', 'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForZeroShotImageClassification', ), ('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'), ('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'), ('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'), ] def __UpperCAmelCase ( A : Optional[int] ) -> int: UpperCAmelCase_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , A ) return [m.group(0 ) for m in matches] def __UpperCAmelCase ( ) -> str: UpperCAmelCase_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES UpperCAmelCase_ : Optional[Any] = { config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. UpperCAmelCase_ : Dict = collections.defaultdict(A ) UpperCAmelCase_ : str = collections.defaultdict(A ) UpperCAmelCase_ : int = collections.defaultdict(A ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(A ): UpperCAmelCase_ : int = None if _re_tf_models.match(A ) is not None: UpperCAmelCase_ : Optional[Any] = tf_models UpperCAmelCase_ : Optional[int] = _re_tf_models.match(A ).groups()[0] elif _re_flax_models.match(A ) is not None: UpperCAmelCase_ : int = flax_models UpperCAmelCase_ : Any = _re_flax_models.match(A ).groups()[0] elif _re_pt_models.match(A ) is not None: UpperCAmelCase_ : Union[str, Any] = pt_models UpperCAmelCase_ : List[Any] = _re_pt_models.match(A ).groups()[0] if lookup_dict is not None: while len(A ) > 0: if attr_name in model_prefix_to_model_type: UpperCAmelCase_ : Optional[int] = True break # Try again after removing the last word in the name UpperCAmelCase_ : List[Any] = ''''''.join(camel_case_split(A )[:-1] ) UpperCAmelCase_ : Tuple = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) UpperCAmelCase_ : List[Any] = list(A ) all_models.sort() UpperCAmelCase_ : Dict = {'''model_type''': all_models} UpperCAmelCase_ : Tuple = [pt_models[t] for t in all_models] UpperCAmelCase_ : Dict = [tf_models[t] for t in all_models] UpperCAmelCase_ : Optional[int] = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure UpperCAmelCase_ : int = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: UpperCAmelCase_ : Any = '''AutoProcessor''' elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: UpperCAmelCase_ : Union[str, Any] = '''AutoTokenizer''' elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: UpperCAmelCase_ : int = '''AutoFeatureExtractor''' else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. UpperCAmelCase_ : Dict = '''AutoTokenizer''' UpperCAmelCase_ : str = [processors[t] for t in all_models] return pd.DataFrame(A ) def __UpperCAmelCase ( A : Optional[int] ) -> str: UpperCAmelCase_ : int = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: UpperCAmelCase_ : Tuple = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"] UpperCAmelCase_ : Tuple = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"] # Loop through all three frameworks for module, cls, mapping in zip(A , A , A ): # The type of pipeline may not exist in this framework if not hasattr(A , A ): continue # First extract all model_names UpperCAmelCase_ : List[str] = [] for name in getattr(A , A ).values(): if isinstance(A , A ): model_names.append(A ) else: model_names.extend(list(A ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def __UpperCAmelCase ( A : int , A : Any ) -> Tuple: UpperCAmelCase_ : Tuple = get_frameworks_table() UpperCAmelCase_ : Any = Dataset.from_pandas(A ) UpperCAmelCase_ : str = hf_hub_download( '''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=A ) UpperCAmelCase_ : Union[str, Any] = Dataset.from_json(A ) UpperCAmelCase_ : Optional[int] = { tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class''']) for i in range(len(A ) ) } UpperCAmelCase_ : str = update_pipeline_and_auto_class_table(A ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. UpperCAmelCase_ : Union[str, Any] = sorted(table.keys() ) UpperCAmelCase_ : Optional[Any] = pd.DataFrame( { '''model_class''': model_classes, '''pipeline_tag''': [table[m][0] for m in model_classes], '''auto_class''': [table[m][1] for m in model_classes], } ) UpperCAmelCase_ : Dict = Dataset.from_pandas(A ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(A , '''frameworks.json''' ) ) tags_dataset.to_json(os.path.join(A , '''pipeline_tags.json''' ) ) if commit_sha is not None: UpperCAmelCase_ : List[str] = ( F"Update with commit {commit_sha}\n\nSee: " F"https://github.com/huggingface/transformers/commit/{commit_sha}" ) else: UpperCAmelCase_ : int = '''Update''' upload_folder( repo_id='''huggingface/transformers-metadata''' , folder_path=A , repo_type='''dataset''' , token=A , commit_message=A , ) def __UpperCAmelCase ( ) -> int: UpperCAmelCase_ : str = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} UpperCAmelCase_ : List[str] = transformers_module.pipelines.SUPPORTED_TASKS UpperCAmelCase_ : List[str] = [] for key in pipeline_tasks: if key not in in_table: UpperCAmelCase_ : Optional[Any] = pipeline_tasks[key]['''pt'''] if isinstance(A , (list, tuple) ): UpperCAmelCase_ : Dict = model[0] UpperCAmelCase_ : Any = model.__name__ if model not in in_table.values(): missing.append(A ) if len(A ) > 0: UpperCAmelCase_ : List[Any] = ''', '''.join(A ) raise ValueError( '''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside ''' F"`utils/update_metadata.py`: {msg}. Please add them!" ) if __name__ == "__main__": _UpperCamelCase : int = argparse.ArgumentParser() parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.') parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.') parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.') _UpperCamelCase : Tuple = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
304
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase): a_ = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) a_ = ( { "feature-extraction": TFMobileBertModel, "fill-mask": TFMobileBertForMaskedLM, "question-answering": TFMobileBertForQuestionAnswering, "text-classification": TFMobileBertForSequenceClassification, "token-classification": TFMobileBertForTokenClassification, "zero-shot": TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) a_ = False a_ = False def A ( self : List[str] , _A : Any , _A : Dict , _A : List[str]=False ) -> int: UpperCAmelCase_ : List[Any] = super()._prepare_for_class(_A , _A , return_labels=_A ) if return_labels: if model_class in get_values(_A ): UpperCAmelCase_ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class snake_case__ ( UpperCamelCase): def __init__( self : List[Any] , _A : Optional[Any] , _A : Optional[int]=13 , _A : int=7 , _A : Optional[Any]=True , _A : List[str]=True , _A : Optional[int]=True , _A : Any=True , _A : Tuple=99 , _A : List[Any]=32 , _A : Dict=32 , _A : Union[str, Any]=2 , _A : List[Any]=4 , _A : Optional[int]=37 , _A : Any="gelu" , _A : Tuple=0.1 , _A : Union[str, Any]=0.1 , _A : Any=5_12 , _A : Optional[int]=16 , _A : Optional[int]=2 , _A : Tuple=0.02 , _A : Any=3 , _A : int=4 , _A : Optional[Any]=None , ) -> Any: UpperCAmelCase_ : Optional[int] = parent UpperCAmelCase_ : Union[str, Any] = batch_size UpperCAmelCase_ : List[str] = seq_length UpperCAmelCase_ : Dict = is_training UpperCAmelCase_ : Optional[Any] = use_input_mask UpperCAmelCase_ : List[Any] = use_token_type_ids UpperCAmelCase_ : Optional[Any] = use_labels UpperCAmelCase_ : Tuple = vocab_size UpperCAmelCase_ : int = hidden_size UpperCAmelCase_ : Tuple = num_hidden_layers UpperCAmelCase_ : List[str] = num_attention_heads UpperCAmelCase_ : Union[str, Any] = intermediate_size UpperCAmelCase_ : str = hidden_act UpperCAmelCase_ : int = hidden_dropout_prob UpperCAmelCase_ : int = attention_probs_dropout_prob UpperCAmelCase_ : Dict = max_position_embeddings UpperCAmelCase_ : str = type_vocab_size UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size UpperCAmelCase_ : Dict = initializer_range UpperCAmelCase_ : Dict = num_labels UpperCAmelCase_ : Dict = num_choices UpperCAmelCase_ : str = scope UpperCAmelCase_ : Union[str, Any] = embedding_size def A ( self : List[str] ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : Optional[int] = None if self.use_input_mask: UpperCAmelCase_ : Any = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : Union[str, Any] = None if self.use_token_type_ids: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : List[str] = None if self.use_labels: UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : Optional[int] = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : Any , _A : int , _A : Tuple , _A : Any , _A : Any , _A : str , _A : Optional[int] , _A : int ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = TFMobileBertModel(config=_A ) UpperCAmelCase_ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} UpperCAmelCase_ : str = model(_A ) UpperCAmelCase_ : Tuple = [input_ids, input_mask] UpperCAmelCase_ : Any = model(_A ) UpperCAmelCase_ : Any = model(_A ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def A ( self : Any , _A : int , _A : List[str] , _A : str , _A : Tuple , _A : str , _A : List[Any] , _A : Dict ) -> str: UpperCAmelCase_ : str = TFMobileBertForMaskedLM(config=_A ) UpperCAmelCase_ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} UpperCAmelCase_ : Tuple = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : Optional[Any] , _A : Dict , _A : List[str] , _A : str , _A : List[str] , _A : str , _A : Optional[int] , _A : Optional[Any] ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=_A ) UpperCAmelCase_ : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} UpperCAmelCase_ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def A ( self : Tuple , _A : str , _A : Optional[int] , _A : List[Any] , _A : int , _A : Union[str, Any] , _A : int , _A : Any ) -> List[Any]: UpperCAmelCase_ : Optional[int] = TFMobileBertForPreTraining(config=_A ) UpperCAmelCase_ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} UpperCAmelCase_ : Optional[int] = model(_A ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def A ( self : Union[str, Any] , _A : int , _A : List[Any] , _A : Dict , _A : int , _A : List[str] , _A : List[Any] , _A : Union[str, Any] ) -> Tuple: UpperCAmelCase_ : Tuple = self.num_labels UpperCAmelCase_ : List[Any] = TFMobileBertForSequenceClassification(config=_A ) UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} UpperCAmelCase_ : Dict = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Optional[int] , _A : List[str] , _A : Optional[Any] , _A : List[Any] , _A : List[str] , _A : int , _A : Dict , _A : List[Any] ) -> Tuple: UpperCAmelCase_ : Optional[int] = self.num_choices UpperCAmelCase_ : str = TFMobileBertForMultipleChoice(config=_A ) UpperCAmelCase_ : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase_ : Optional[int] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase_ : Optional[int] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase_ : str = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } UpperCAmelCase_ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : int , _A : Optional[Any] , _A : List[Any] , _A : List[Any] , _A : Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = self.num_labels UpperCAmelCase_ : Dict = TFMobileBertForTokenClassification(config=_A ) UpperCAmelCase_ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} UpperCAmelCase_ : List[Any] = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : Optional[Any] , _A : Union[str, Any] , _A : Any , _A : Dict , _A : Union[str, Any] , _A : Optional[int] , _A : Any , _A : str ) -> List[Any]: UpperCAmelCase_ : Dict = TFMobileBertForQuestionAnswering(config=_A ) UpperCAmelCase_ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} UpperCAmelCase_ : Optional[Any] = model(_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : Dict ) -> Union[str, Any]: UpperCAmelCase_ : Dict = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : int = config_and_inputs UpperCAmelCase_ : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict def A ( self : int ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = TFMobileBertModelTest.TFMobileBertModelTester(self ) UpperCAmelCase_ : int = ConfigTester(self , config_class=_A , hidden_size=37 ) def A ( self : Optional[int] ) -> Tuple: self.config_tester.run_common_tests() def A ( self : Union[str, Any] ) -> Tuple: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*_A ) def A ( self : Tuple ) -> List[Any]: UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*_A ) def A ( self : int ) -> Dict: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_A ) def A ( self : int ) -> Optional[int]: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_A ) def A ( self : Union[str, Any] ) -> int: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*_A ) def A ( self : Dict ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*_A ) def A ( self : Any ) -> Any: UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_A ) def A ( self : Dict ) -> List[Any]: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*_A ) @slow def A ( self : Dict ) -> List[str]: # for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["google/mobilebert-uncased"]: UpperCAmelCase_ : Dict = TFMobileBertModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_tf class snake_case__ ( unittest.TestCase): @slow def A ( self : List[Any] ) -> List[str]: UpperCAmelCase_ : Dict = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' ) UpperCAmelCase_ : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] ) UpperCAmelCase_ : Union[str, Any] = model(_A )[0] UpperCAmelCase_ : Optional[int] = [1, 6, 3_05_22] self.assertEqual(output.shape , _A ) UpperCAmelCase_ : Any = tf.constant( [ [ [-4.5_919_547, -9.248_295, -9.645_256], [-6.7_306_175, -6.440_284, -6.6_052_837], [-7.2_743_506, -6.7_847_915, -6.024_673], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1e-4 )
304
'''simple docstring''' import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _UpperCamelCase : Union[str, Any] = logging.getLogger(__name__) _UpperCamelCase : Optional[int] = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) _UpperCamelCase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class snake_case__ : a_ = field( default=UpperCamelCase , metadata={ "help": ( "The model checkpoint for weights initialization. Leave None if you want to train a model from" " scratch." ) } , ) a_ = field( default=UpperCamelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCamelCase)} , ) a_ = field( default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"}) a_ = field( default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) a_ = field( default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class snake_case__ : a_ = field( default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."}) a_ = field( default=UpperCamelCase , metadata={ "help": ( "The input training data files (multiple files in glob format). " "Very often splitting large files to smaller files can prevent tokenizer going out of memory" ) } , ) a_ = field( default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) a_ = field( default=UpperCamelCase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , ) a_ = field( default=UpperCamelCase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , ) a_ = field( default=UpperCamelCase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , ) a_ = field( default=UpperCamelCase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."}) a_ = field(default=UpperCamelCase , metadata={"help": "Whether ot not to use whole word mask."}) a_ = field( default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}) a_ = field( default=1 / 6 , metadata={ "help": ( "Ratio of length of a span of masked tokens to surrounding context length for permutation language" " modeling." ) } , ) a_ = field( default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."}) a_ = field( default=-1 , metadata={ "help": ( "Optional input sequence length after tokenization." "The training dataset will be truncated in block of this size for training." "Default to the model max input length for single sentence inputs (take into account special tokens)." ) } , ) a_ = field( default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"}) def __UpperCAmelCase ( A : DataTrainingArguments , A : PreTrainedTokenizer , A : bool = False , A : Optional[str] = None , ) -> List[Any]: def _dataset(A : Dict , A : str=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' ) return LineByLineWithRefDataset( tokenizer=A , file_path=A , block_size=args.block_size , ref_path=A , ) return LineByLineTextDataset(tokenizer=A , file_path=A , block_size=args.block_size ) else: return TextDataset( tokenizer=A , file_path=A , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=A , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(A ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def __UpperCAmelCase ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCAmelCase_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( '''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file ''' '''or remove the --do_eval argument.''' ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. Use" ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , A ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: UpperCAmelCase_ : List[Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.tokenizer_name: UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another''' ''' script, save it,and load it from here, using --tokenizer_name''' ) if model_args.model_name_or_path: UpperCAmelCase_ : str = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , ) else: logger.info('''Training new model from scratch''' ) UpperCAmelCase_ : int = AutoModelWithLMHead.from_config(A ) model.resize_token_embeddings(len(A ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( '''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the''' '''--mlm flag (masked language modeling).''' ) if data_args.block_size <= 0: UpperCAmelCase_ : List[str] = tokenizer.max_len # Our input block size will be the max possible for the model else: UpperCAmelCase_ : Dict = min(data_args.block_size , tokenizer.max_len ) # Get datasets UpperCAmelCase_ : str = ( get_dataset(A , tokenizer=A , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) UpperCAmelCase_ : Any = ( get_dataset(A , tokenizer=A , evaluate=A , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": UpperCAmelCase_ : Optional[int] = DataCollatorForPermutationLanguageModeling( tokenizer=A , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: UpperCAmelCase_ : Tuple = DataCollatorForWholeWordMask( tokenizer=A , mlm_probability=data_args.mlm_probability ) else: UpperCAmelCase_ : List[str] = DataCollatorForLanguageModeling( tokenizer=A , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer UpperCAmelCase_ : Any = Trainer( model=A , args=A , data_collator=A , train_dataset=A , eval_dataset=A , prediction_loss_only=A , ) # Training if training_args.do_train: UpperCAmelCase_ : List[str] = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=A ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation UpperCAmelCase_ : Tuple = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) UpperCAmelCase_ : Dict = trainer.evaluate() UpperCAmelCase_ : Union[str, Any] = math.exp(eval_output['''eval_loss'''] ) UpperCAmelCase_ : Optional[int] = {'''perplexity''': perplexity} UpperCAmelCase_ : int = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' ) if trainer.is_world_master(): with open(A , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''' , A , str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) results.update(A ) return results def __UpperCAmelCase ( A : Tuple ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
304
1
'''simple docstring''' from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent _UpperCamelCase : List[Any] = {'UserAgent': UserAgent().random} def __UpperCAmelCase ( A : str ) -> dict: UpperCAmelCase_ : Tuple = script.contents[0] UpperCAmelCase_ : Any = json.loads(data[data.find('''{"config"''' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class snake_case__ : def __init__( self : int , _A : Tuple ) -> List[Any]: UpperCAmelCase_ : Dict = F"https://www.instagram.com/{username}/" UpperCAmelCase_ : Optional[int] = self.get_json() def A ( self : Optional[int] ) -> dict: UpperCAmelCase_ : List[Any] = requests.get(self.url , headers=_A ).text UpperCAmelCase_ : Tuple = BeautifulSoup(_A , '''html.parser''' ).find_all('''script''' ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Dict ) -> str: return F"{self.__class__.__name__}('{self.username}')" def __str__( self : Optional[int] ) -> str: return F"{self.fullname} ({self.username}) is {self.biography}" @property def A ( self : List[str] ) -> str: return self.user_data["username"] @property def A ( self : Tuple ) -> str: return self.user_data["full_name"] @property def A ( self : List[Any] ) -> str: return self.user_data["biography"] @property def A ( self : str ) -> str: return self.user_data["business_email"] @property def A ( self : Optional[int] ) -> str: return self.user_data["external_url"] @property def A ( self : int ) -> int: return self.user_data["edge_followed_by"]["count"] @property def A ( self : Union[str, Any] ) -> int: return self.user_data["edge_follow"]["count"] @property def A ( self : List[str] ) -> int: return self.user_data["edge_owner_to_timeline_media"]["count"] @property def A ( self : str ) -> str: return self.user_data["profile_pic_url_hd"] @property def A ( self : Any ) -> bool: return self.user_data["is_verified"] @property def A ( self : Tuple ) -> bool: return self.user_data["is_private"] def __UpperCAmelCase ( A : str = "github" ) -> None: import os if os.environ.get('''CI''' ): return # test failing on GitHub Actions UpperCAmelCase_ : List[str] = InstagramUser(A ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , A ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_5_0 assert instagram_user.number_of_followers > 1_2_0_0_0_0 assert instagram_user.number_of_followings > 1_5 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('''https://instagram.''' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() _UpperCamelCase : List[str] = InstagramUser('github') print(instagram_user) print(f'''{instagram_user.number_of_posts = }''') print(f'''{instagram_user.number_of_followers = }''') print(f'''{instagram_user.number_of_followings = }''') print(f'''{instagram_user.email = }''') print(f'''{instagram_user.website = }''') print(f'''{instagram_user.profile_picture_url = }''') print(f'''{instagram_user.is_verified = }''') print(f'''{instagram_user.is_private = }''')
304
'''simple docstring''' import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel _UpperCamelCase : Optional[int] = '0.12' # assumed parallelism: 8 @require_flax @is_staging_test class snake_case__ ( unittest.TestCase): @classmethod def A ( cls : Optional[int] ) -> Tuple: UpperCAmelCase_ : List[str] = TOKEN HfFolder.save_token(_A ) @classmethod def A ( cls : int ) -> Tuple: try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def A ( self : Dict ) -> Optional[int]: UpperCAmelCase_ : List[Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) UpperCAmelCase_ : List[str] = FlaxBertModel(_A ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) UpperCAmelCase_ : Any = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" ) UpperCAmelCase_ : int = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : List[str] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_A , repo_id='''test-model-flax''' , push_to_hub=_A , use_auth_token=self._token ) UpperCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" ) UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : int = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" ) def A ( self : str ) -> Tuple: UpperCAmelCase_ : List[str] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) UpperCAmelCase_ : Optional[Any] = FlaxBertModel(_A ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) UpperCAmelCase_ : List[str] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : Any = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( _A , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_A , use_auth_token=self._token ) UpperCAmelCase_ : int = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : Tuple = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" ) def __UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] ) -> List[Any]: UpperCAmelCase_ : Optional[int] = True UpperCAmelCase_ : Optional[int] = flatten_dict(modela.params ) UpperCAmelCase_ : str = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4: UpperCAmelCase_ : int = False return models_are_equal @require_flax class snake_case__ ( unittest.TestCase): def A ( self : Any ) -> Any: UpperCAmelCase_ : Any = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) UpperCAmelCase_ : Any = FlaxBertModel(_A ) UpperCAmelCase_ : Tuple = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_A , _A ) ) with self.assertRaises(_A ): UpperCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(_A ) UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A ) self.assertTrue(check_models_equal(_A , _A ) ) def A ( self : int ) -> Tuple: UpperCAmelCase_ : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) UpperCAmelCase_ : Tuple = FlaxBertModel(_A ) UpperCAmelCase_ : str = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_A , _A ) , max_shard_size='''10KB''' ) with self.assertRaises(_A ): UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(_A ) UpperCAmelCase_ : Dict = FlaxBertModel.from_pretrained(_A , subfolder=_A ) self.assertTrue(check_models_equal(_A , _A ) ) def A ( self : int ) -> Optional[int]: UpperCAmelCase_ : int = '''bert''' UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(_A ): UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(_A ) UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(_A , subfolder=_A ) self.assertIsNotNone(_A ) def A ( self : Any ) -> str: UpperCAmelCase_ : Optional[Any] = '''bert''' UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(_A ): UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A ) UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A ) self.assertIsNotNone(_A )
304
1
'''simple docstring''' import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def __UpperCAmelCase ( A : List[str] , A : Any , A : Optional[int] , A : Optional[int] ) -> Optional[Any]: if isinstance(A , A ): UpperCAmelCase_ : Any = np.full((len(A ), sequence_length, 2) , A ) else: UpperCAmelCase_ : int = np.full((len(A ), sequence_length) , A ) for i, tensor in enumerate(A ): if padding_side == "right": if isinstance(A , A ): UpperCAmelCase_ : Tuple = tensor[:sequence_length] else: UpperCAmelCase_ : Dict = tensor[:sequence_length] else: if isinstance(A , A ): UpperCAmelCase_ : Optional[Any] = tensor[:sequence_length] else: UpperCAmelCase_ : int = tensor[:sequence_length] return out_tensor.tolist() def __UpperCAmelCase ( A : List[Any] ) -> str: UpperCAmelCase_ : Dict = ord(A ) if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6): return True UpperCAmelCase_ : Union[str, Any] = unicodedata.category(A ) if cat.startswith('''P''' ): return True return False @dataclass class snake_case__ ( UpperCamelCase): a_ = 42 a_ = True a_ = None a_ = None a_ = -100 a_ = "pt" def A ( self : List[Any] , _A : Dict ) -> Tuple: import torch UpperCAmelCase_ : Dict = '''label''' if '''label''' in features[0].keys() else '''labels''' UpperCAmelCase_ : List[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None UpperCAmelCase_ : Tuple = self.tokenizer.pad( _A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch UpperCAmelCase_ : Any = torch.tensor(batch['''entity_ids'''] ).shape[1] UpperCAmelCase_ : Union[str, Any] = self.tokenizer.padding_side if padding_side == "right": UpperCAmelCase_ : Optional[Any] = [ list(_A ) + [self.label_pad_token_id] * (sequence_length - len(_A )) for label in labels ] else: UpperCAmelCase_ : Any = [ [self.label_pad_token_id] * (sequence_length - len(_A )) + list(_A ) for label in labels ] UpperCAmelCase_ : Union[str, Any] = [feature['''ner_tags'''] for feature in features] UpperCAmelCase_ : Union[str, Any] = padding_tensor(_A , -1 , _A , _A ) UpperCAmelCase_ : List[str] = [feature['''original_entity_spans'''] for feature in features] UpperCAmelCase_ : int = padding_tensor(_A , (-1, -1) , _A , _A ) UpperCAmelCase_ : Union[str, Any] = {k: torch.tensor(_A , dtype=torch.intaa ) for k, v in batch.items()} return batch
304
'''simple docstring''' _UpperCamelCase : Tuple = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _UpperCamelCase : Any = [{'type': 'code', 'content': INSTALL_CONTENT}] _UpperCamelCase : Dict = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
304
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig _UpperCamelCase : Union[str, Any] = { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json', } class snake_case__ ( UpperCamelCase): a_ = "albert" def __init__( self : Tuple , _A : Optional[Any]=3_00_00 , _A : Any=1_28 , _A : Tuple=40_96 , _A : Tuple=12 , _A : Union[str, Any]=1 , _A : int=64 , _A : Optional[int]=1_63_84 , _A : int=1 , _A : Optional[Any]="gelu_new" , _A : List[Any]=0 , _A : int=0 , _A : int=5_12 , _A : int=2 , _A : Tuple=0.02 , _A : Tuple=1e-12 , _A : Optional[Any]=0.1 , _A : Optional[int]="absolute" , _A : Any=0 , _A : List[Any]=2 , _A : int=3 , **_A : Any , ) -> str: super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A ) UpperCAmelCase_ : Optional[Any] = vocab_size UpperCAmelCase_ : List[Any] = embedding_size UpperCAmelCase_ : Optional[int] = hidden_size UpperCAmelCase_ : Any = num_hidden_layers UpperCAmelCase_ : Tuple = num_hidden_groups UpperCAmelCase_ : Optional[Any] = num_attention_heads UpperCAmelCase_ : Dict = inner_group_num UpperCAmelCase_ : List[Any] = hidden_act UpperCAmelCase_ : Union[str, Any] = intermediate_size UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Any = max_position_embeddings UpperCAmelCase_ : List[str] = type_vocab_size UpperCAmelCase_ : Optional[int] = initializer_range UpperCAmelCase_ : List[str] = layer_norm_eps UpperCAmelCase_ : Tuple = classifier_dropout_prob UpperCAmelCase_ : Optional[Any] = position_embedding_type class snake_case__ ( UpperCamelCase): @property def A ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": UpperCAmelCase_ : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCAmelCase_ : List[str] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
304
'''simple docstring''' import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def __UpperCAmelCase ( A : List[str] , A : Any , A : Optional[int] , A : Optional[int] ) -> Optional[Any]: if isinstance(A , A ): UpperCAmelCase_ : Any = np.full((len(A ), sequence_length, 2) , A ) else: UpperCAmelCase_ : int = np.full((len(A ), sequence_length) , A ) for i, tensor in enumerate(A ): if padding_side == "right": if isinstance(A , A ): UpperCAmelCase_ : Tuple = tensor[:sequence_length] else: UpperCAmelCase_ : Dict = tensor[:sequence_length] else: if isinstance(A , A ): UpperCAmelCase_ : Optional[Any] = tensor[:sequence_length] else: UpperCAmelCase_ : int = tensor[:sequence_length] return out_tensor.tolist() def __UpperCAmelCase ( A : List[Any] ) -> str: UpperCAmelCase_ : Dict = ord(A ) if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6): return True UpperCAmelCase_ : Union[str, Any] = unicodedata.category(A ) if cat.startswith('''P''' ): return True return False @dataclass class snake_case__ ( UpperCamelCase): a_ = 42 a_ = True a_ = None a_ = None a_ = -100 a_ = "pt" def A ( self : List[Any] , _A : Dict ) -> Tuple: import torch UpperCAmelCase_ : Dict = '''label''' if '''label''' in features[0].keys() else '''labels''' UpperCAmelCase_ : List[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None UpperCAmelCase_ : Tuple = self.tokenizer.pad( _A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch UpperCAmelCase_ : Any = torch.tensor(batch['''entity_ids'''] ).shape[1] UpperCAmelCase_ : Union[str, Any] = self.tokenizer.padding_side if padding_side == "right": UpperCAmelCase_ : Optional[Any] = [ list(_A ) + [self.label_pad_token_id] * (sequence_length - len(_A )) for label in labels ] else: UpperCAmelCase_ : Any = [ [self.label_pad_token_id] * (sequence_length - len(_A )) + list(_A ) for label in labels ] UpperCAmelCase_ : Union[str, Any] = [feature['''ner_tags'''] for feature in features] UpperCAmelCase_ : Union[str, Any] = padding_tensor(_A , -1 , _A , _A ) UpperCAmelCase_ : List[str] = [feature['''original_entity_spans'''] for feature in features] UpperCAmelCase_ : int = padding_tensor(_A , (-1, -1) , _A , _A ) UpperCAmelCase_ : Union[str, Any] = {k: torch.tensor(_A , dtype=torch.intaa ) for k, v in batch.items()} return batch
304
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _UpperCamelCase : List[Any] = logging.get_logger(__name__) _UpperCamelCase : Dict = {'vocab_file': 'sentencepiece.model'} _UpperCamelCase : List[str] = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, } _UpperCamelCase : Dict = { 'google/rembert': 256, } class snake_case__ ( UpperCamelCase): a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : int , _A : Union[str, Any] , _A : Any=False , _A : Optional[int]=True , _A : List[str]=True , _A : Union[str, Any]="[CLS]" , _A : List[str]="[SEP]" , _A : List[Any]="[UNK]" , _A : str="[SEP]" , _A : Union[str, Any]="[PAD]" , _A : Union[str, Any]="[CLS]" , _A : List[Any]="[MASK]" , **_A : Dict , ) -> Dict: super().__init__( do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , **_A , ) UpperCAmelCase_ : int = do_lower_case UpperCAmelCase_ : Any = remove_space UpperCAmelCase_ : int = keep_accents UpperCAmelCase_ : Optional[Any] = vocab_file UpperCAmelCase_ : List[str] = spm.SentencePieceProcessor() self.sp_model.Load(_A ) @property def A ( self : Any ) -> Optional[Any]: return len(self.sp_model ) def A ( self : Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : int ) -> int: UpperCAmelCase_ : str = self.__dict__.copy() UpperCAmelCase_ : Dict = None return state def __setstate__( self : Optional[int] , _A : Any ) -> Optional[int]: UpperCAmelCase_ : Any = d UpperCAmelCase_ : Union[str, Any] = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def A ( self : str , _A : Tuple , _A : List[str]=False ) -> Tuple: UpperCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(_A ) return pieces def A ( self : Optional[int] , _A : str ) -> List[str]: return self.sp_model.PieceToId(_A ) def A ( self : str , _A : List[str] ) -> Optional[Any]: return self.sp_model.IdToPiece(_A ) def A ( self : List[str] , _A : str ) -> Union[str, Any]: UpperCAmelCase_ : str = self.sp_model.decode_pieces(_A ) return out_string def A ( self : str , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: UpperCAmelCase_ : Dict = [self.sep_token_id] UpperCAmelCase_ : Any = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def A ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1] return [1] + ([0] * len(_A )) + [1] def A ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: UpperCAmelCase_ : Any = [self.sep_token_id] UpperCAmelCase_ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def A ( self : Optional[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(_A ): logger.error('''Vocabulary path ({}) should be a directory'''.format(_A ) ) return UpperCAmelCase_ : Dict = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ): copyfile(self.vocab_file , _A ) return (out_vocab_file,)
304
'''simple docstring''' import functools def __UpperCAmelCase ( A : str , A : str ) -> int: UpperCAmelCase_ : Optional[Any] = len(A ) UpperCAmelCase_ : List[str] = len(A ) @functools.cache def min_distance(A : int , A : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCAmelCase_ : Any = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , A ) , 1 + min_distance(A , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
304
1
'''simple docstring''' def __UpperCAmelCase ( A : str ) -> str: return " ".join( ''''''.join(word[::-1] ) if len(A ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
304
'''simple docstring''' def __UpperCAmelCase ( A : int = 1_0_0_0 ) -> int: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = 1, 1 UpperCAmelCase_ : Dict = [] for i in range(1 , n + 1 ): UpperCAmelCase_ : Optional[int] = prev_numerator + 2 * prev_denominator UpperCAmelCase_ : Tuple = prev_numerator + prev_denominator if len(str(A ) ) > len(str(A ) ): result.append(A ) UpperCAmelCase_ : Optional[Any] = numerator UpperCAmelCase_ : Optional[int] = denominator return len(A ) if __name__ == "__main__": print(f'''{solution() = }''')
304
1
'''simple docstring''' import collections import os import re from pathlib import Path _UpperCamelCase : str = 'src/transformers' # Matches is_xxx_available() _UpperCamelCase : List[str] = re.compile(R'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} _UpperCamelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] _UpperCamelCase : Any = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available _UpperCamelCase : List[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") _UpperCamelCase : Union[str, Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] _UpperCamelCase : Optional[Any] = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", _UpperCamelCase : List[str] = re.compile(R'^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], _UpperCamelCase : Union[str, Any] = re.compile(R'^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo _UpperCamelCase : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: _UpperCamelCase : Any = re.compile(R'^\s*try:') # Catches a line with else: _UpperCamelCase : Optional[Any] = re.compile(R'^\s*else:') def __UpperCAmelCase ( A : str ) -> str: if _re_test_backend.search(A ) is None: return None UpperCAmelCase_ : Tuple = [b[0] for b in _re_backend.findall(A )] backends.sort() return "_and_".join(A ) def __UpperCAmelCase ( A : Optional[int] ) -> Tuple: with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: UpperCAmelCase_ : List[str] = f.readlines() UpperCAmelCase_ : Dict = 0 while line_index < len(A ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(A ): return None # First grab the objects without a specific backend in _import_structure UpperCAmelCase_ : Dict = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: UpperCAmelCase_ : Union[str, Any] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(A ): UpperCAmelCase_ : Dict = _re_one_line_import_struct.search(A ).groups()[0] UpperCAmelCase_ : List[Any] = re.findall(r'''\[([^\]]+)\]''' , A ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue UpperCAmelCase_ : Optional[int] = _re_import_struct_key_value.search(A ) if single_line_import_search is not None: UpperCAmelCase_ : Union[str, Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(A ) > 0] objects.extend(A ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 UpperCAmelCase_ : Dict = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. UpperCAmelCase_ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCAmelCase_ : List[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCAmelCase_ : List[Any] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): UpperCAmelCase_ : Tuple = lines[line_index] if _re_import_struct_add_one.search(A ) is not None: objects.append(_re_import_struct_add_one.search(A ).groups()[0] ) elif _re_import_struct_add_many.search(A ) is not None: UpperCAmelCase_ : Optional[int] = _re_import_struct_add_many.search(A ).groups()[0].split(''', ''' ) UpperCAmelCase_ : Optional[int] = [obj[1:-1] for obj in imports if len(A ) > 0] objects.extend(A ) elif _re_between_brackets.search(A ) is not None: UpperCAmelCase_ : List[str] = _re_between_brackets.search(A ).groups()[0].split(''', ''' ) UpperCAmelCase_ : Any = [obj[1:-1] for obj in imports if len(A ) > 0] objects.extend(A ) elif _re_quote_object.search(A ) is not None: objects.append(_re_quote_object.search(A ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 1_2 + '''"''' ): objects.append(line[1_3:-3] ) line_index += 1 UpperCAmelCase_ : Dict = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend UpperCAmelCase_ : int = [] while ( line_index < len(A ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): UpperCAmelCase_ : Dict = lines[line_index] UpperCAmelCase_ : List[str] = _re_import.search(A ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 UpperCAmelCase_ : Optional[int] = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(A ): # If the line is an if is_backend_available, we grab all objects associated. UpperCAmelCase_ : List[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: UpperCAmelCase_ : Dict = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 UpperCAmelCase_ : Any = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): UpperCAmelCase_ : Dict = lines[line_index] UpperCAmelCase_ : str = _re_import.search(A ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 1_2 ): objects.append(line[1_2:-2] ) line_index += 1 UpperCAmelCase_ : Any = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __UpperCAmelCase ( A : int , A : str ) -> Tuple: def find_duplicates(A : Optional[int] ): return [k for k, v in collections.Counter(A ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] UpperCAmelCase_ : Optional[Any] = [] for key in import_dict_objects.keys(): UpperCAmelCase_ : Union[str, Any] = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"Duplicate _import_structure definitions for: {duplicate_imports}" ) UpperCAmelCase_ : Any = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): UpperCAmelCase_ : Optional[int] = '''base imports''' if key == '''none''' else F"{key} backend" errors.append(F"Differences for {name}:" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F" {a} in TYPE_HINT but not in _import_structure." ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F" {a} in _import_structure but not in TYPE_HINT." ) return errors def __UpperCAmelCase ( ) -> Tuple: UpperCAmelCase_ : Dict = [] for root, _, files in os.walk(A ): if "__init__.py" in files: UpperCAmelCase_ : Dict = os.path.join(A , '''__init__.py''' ) UpperCAmelCase_ : List[str] = parse_init(A ) if objects is not None: UpperCAmelCase_ : Dict = analyze_results(*A ) if len(A ) > 0: UpperCAmelCase_ : Union[str, Any] = F"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}" failures.append('''\n'''.join(A ) ) if len(A ) > 0: raise ValueError('''\n\n'''.join(A ) ) def __UpperCAmelCase ( ) -> Optional[int]: UpperCAmelCase_ : str = [] for path, directories, files in os.walk(A ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(A ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(A ) / folder).glob('''*.py''' ) ) ) == 0: continue UpperCAmelCase_ : Optional[Any] = str((Path(A ) / folder).relative_to(A ) ) UpperCAmelCase_ : List[Any] = short_path.replace(os.path.sep , '''.''' ) submodules.append(A ) for fname in files: if fname == "__init__.py": continue UpperCAmelCase_ : Any = str((Path(A ) / fname).relative_to(A ) ) UpperCAmelCase_ : Dict = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(A ) return submodules _UpperCamelCase : Any = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', 'models.esm.openfold_utils', ] def __UpperCAmelCase ( ) -> Union[str, Any]: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import UpperCAmelCase_ : Dict = direct_transformers_import(A ) UpperCAmelCase_ : Any = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(A , '''__init__.py''' ) , '''r''' ) as f: UpperCAmelCase_ : Optional[int] = f.read() import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , A ) ) ) UpperCAmelCase_ : Dict = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(A ) > 0: UpperCAmelCase_ : Dict = '''\n'''.join(F"- {module}" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F"{list_of_modules}\n" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
304
'''simple docstring''' import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class snake_case__ ( unittest.TestCase): def __init__( self : int , _A : List[str] , _A : Dict=7 , _A : List[str]=3 , _A : List[str]=18 , _A : Dict=30 , _A : Union[str, Any]=4_00 , _A : List[str]=True , _A : List[str]=None , _A : int=True , _A : Tuple=None , _A : Union[str, Any]=True , _A : Tuple=[0.5, 0.5, 0.5] , _A : Union[str, Any]=[0.5, 0.5, 0.5] , _A : Tuple=False , ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = size if size is not None else {'''height''': 20, '''width''': 20} UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : Optional[int] = batch_size UpperCAmelCase_ : Any = num_channels UpperCAmelCase_ : Optional[Any] = image_size UpperCAmelCase_ : Tuple = min_resolution UpperCAmelCase_ : Tuple = max_resolution UpperCAmelCase_ : Optional[int] = do_resize UpperCAmelCase_ : Tuple = size UpperCAmelCase_ : Optional[Any] = do_center_crop UpperCAmelCase_ : Optional[int] = crop_size UpperCAmelCase_ : Tuple = do_normalize UpperCAmelCase_ : Optional[Any] = image_mean UpperCAmelCase_ : int = image_std UpperCAmelCase_ : List[Any] = do_reduce_labels def A ( self : Union[str, Any] ) -> str: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def __UpperCAmelCase ( ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) UpperCAmelCase_ : Optional[Any] = Image.open(dataset[0]['''file'''] ) UpperCAmelCase_ : str = Image.open(dataset[1]['''file'''] ) return image, map def __UpperCAmelCase ( ) -> Any: UpperCAmelCase_ : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) UpperCAmelCase_ : int = Image.open(ds[0]['''file'''] ) UpperCAmelCase_ : Optional[Any] = Image.open(ds[1]['''file'''] ) UpperCAmelCase_ : Dict = Image.open(ds[2]['''file'''] ) UpperCAmelCase_ : List[str] = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class snake_case__ ( UpperCamelCase , unittest.TestCase): a_ = BeitImageProcessor if is_vision_available() else None def A ( self : Optional[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = BeitImageProcessingTester(self ) @property def A ( self : List[Any] ) -> Tuple: return self.image_processor_tester.prepare_image_processor_dict() def A ( self : List[Any] ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''center_crop''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) def A ( self : List[str] ) -> Optional[int]: UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) self.assertEqual(image_processor.do_reduce_labels , _A ) UpperCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) self.assertEqual(image_processor.do_reduce_labels , _A ) def A ( self : Optional[Any] ) -> Any: pass def A ( self : List[str] ) -> Optional[int]: # Initialize image_processing UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase_ : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def A ( self : Union[str, Any] ) -> Union[str, Any]: # Initialize image_processing UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def A ( self : Optional[int] ) -> str: # Initialize image_processing UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def A ( self : Any ) -> Optional[Any]: # Initialize image_processing UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) UpperCAmelCase_ : Union[str, Any] = [] for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input UpperCAmelCase_ : str = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test batched UpperCAmelCase_ : List[Any] = image_processing(_A , _A , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test not batched input (PIL images) UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs() UpperCAmelCase_ : List[str] = image_processing(_A , _A , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test batched input (PIL images) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = prepare_semantic_batch_inputs() UpperCAmelCase_ : int = image_processing(_A , _A , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) def A ( self : List[Any] ) -> Union[str, Any]: # Initialize image_processing UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs() UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 1_50 ) UpperCAmelCase_ : int = True UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
304
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCamelCase : Any = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Optional[Any] = [ 'MRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MraForMaskedLM', 'MraForMultipleChoice', 'MraForQuestionAnswering', 'MraForSequenceClassification', 'MraForTokenClassification', 'MraLayer', 'MraModel', 'MraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys _UpperCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
304
'''simple docstring''' import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class snake_case__ ( enum.Enum): a_ = 0 a_ = 1 a_ = 2 @add_end_docstrings(UpperCamelCase) class snake_case__ ( UpperCamelCase): a_ = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n " def __init__( self : List[str] , *_A : Dict , **_A : int ) -> Optional[int]: super().__init__(*_A , **_A ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. UpperCAmelCase_ : Dict = None if self.model.config.prefix is not None: UpperCAmelCase_ : Tuple = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. UpperCAmelCase_ : Optional[Any] = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self._sanitize_parameters(prefix=_A , **self._forward_params ) UpperCAmelCase_ : int = {**self._preprocess_params, **preprocess_params} UpperCAmelCase_ : List[str] = {**self._forward_params, **forward_params} def A ( self : Union[str, Any] , _A : int=None , _A : str=None , _A : Union[str, Any]=None , _A : List[Any]=None , _A : List[Any]=None , _A : int=None , _A : Optional[int]=None , _A : List[Any]=None , **_A : List[Any] , ) -> Dict: UpperCAmelCase_ : Union[str, Any] = {} if prefix is not None: UpperCAmelCase_ : List[Any] = prefix if prefix: UpperCAmelCase_ : Tuple = self.tokenizer( _A , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) UpperCAmelCase_ : List[Any] = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected" ''' [None, \'hole\']''' ) UpperCAmelCase_ : Union[str, Any] = handle_long_generation preprocess_params.update(_A ) UpperCAmelCase_ : Optional[int] = generate_kwargs UpperCAmelCase_ : Tuple = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) UpperCAmelCase_ : int = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) UpperCAmelCase_ : List[Any] = ReturnType.TENSORS if return_type is not None: UpperCAmelCase_ : List[Any] = return_type if clean_up_tokenization_spaces is not None: UpperCAmelCase_ : List[Any] = clean_up_tokenization_spaces if stop_sequence is not None: UpperCAmelCase_ : Any = self.tokenizer.encode(_A , add_special_tokens=_A ) if len(_A ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) UpperCAmelCase_ : str = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def A ( self : Dict , *_A : Optional[Any] , **_A : Any ) -> Any: # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*_A , **_A ) def __call__( self : List[Any] , _A : Union[str, Any] , **_A : List[str] ) -> Dict: return super().__call__(_A , **_A ) def A ( self : List[Any] , _A : List[Any] , _A : Any="" , _A : Dict=None , **_A : Dict ) -> Optional[Any]: UpperCAmelCase_ : Tuple = self.tokenizer( prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) UpperCAmelCase_ : str = prompt_text if handle_long_generation == "hole": UpperCAmelCase_ : List[str] = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: UpperCAmelCase_ : Optional[int] = generate_kwargs['''max_new_tokens'''] else: UpperCAmelCase_ : Union[str, Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: UpperCAmelCase_ : Dict = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) UpperCAmelCase_ : List[str] = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: UpperCAmelCase_ : Optional[int] = inputs['''attention_mask'''][:, -keep_length:] return inputs def A ( self : List[str] , _A : Optional[Any] , **_A : str ) -> Optional[int]: UpperCAmelCase_ : Any = model_inputs['''input_ids'''] UpperCAmelCase_ : Dict = model_inputs.get('''attention_mask''' , _A ) # Allow empty prompts if input_ids.shape[1] == 0: UpperCAmelCase_ : Any = None UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : Union[str, Any] = 1 else: UpperCAmelCase_ : Optional[int] = input_ids.shape[0] UpperCAmelCase_ : Dict = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. UpperCAmelCase_ : List[str] = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: UpperCAmelCase_ : str = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: UpperCAmelCase_ : Any = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length UpperCAmelCase_ : Optional[Any] = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL UpperCAmelCase_ : Union[str, Any] = self.model.generate(input_ids=_A , attention_mask=_A , **_A ) UpperCAmelCase_ : Any = generated_sequence.shape[0] if self.framework == "pt": UpperCAmelCase_ : List[str] = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": UpperCAmelCase_ : int = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def A ( self : int , _A : List[Any] , _A : Dict=ReturnType.FULL_TEXT , _A : Dict=True ) -> Union[str, Any]: UpperCAmelCase_ : List[str] = model_outputs['''generated_sequence'''][0] UpperCAmelCase_ : int = model_outputs['''input_ids'''] UpperCAmelCase_ : str = model_outputs['''prompt_text'''] UpperCAmelCase_ : Any = generated_sequence.numpy().tolist() UpperCAmelCase_ : int = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: UpperCAmelCase_ : Optional[Any] = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text UpperCAmelCase_ : Any = self.tokenizer.decode( _A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: UpperCAmelCase_ : List[str] = 0 else: UpperCAmelCase_ : str = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) ) if return_type == ReturnType.FULL_TEXT: UpperCAmelCase_ : Dict = prompt_text + text[prompt_length:] else: UpperCAmelCase_ : Dict = text[prompt_length:] UpperCAmelCase_ : List[str] = {'''generated_text''': all_text} records.append(_A ) return records
304
1
'''simple docstring''' from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
304
'''simple docstring''' from __future__ import annotations import math def __UpperCAmelCase ( A : int , A : int , A : bool , A : list[int] , A : float ) -> int: if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if not scores: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , ) if is_max else min( minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , ) ) def __UpperCAmelCase ( ) -> None: UpperCAmelCase_ : List[str] = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3] UpperCAmelCase_ : List[Any] = math.log(len(A ) , 2 ) print(F"Optimal value : {minimax(0 , 0 , A , A , A )}" ) if __name__ == "__main__": import doctest doctest.testmod() main()
304
1
'''simple docstring''' def __UpperCAmelCase ( A : list[list] ) -> list[list]: UpperCAmelCase_ : int = current_set.copy() for row_index, row in enumerate(A ): UpperCAmelCase_ : Dict = row[0] for column_index, column in enumerate(A ): if magnitude == 0: UpperCAmelCase_ : Optional[int] = column continue UpperCAmelCase_ : List[Any] = column / magnitude # Subtract to cancel term UpperCAmelCase_ : List[str] = current_set[0] UpperCAmelCase_ : str = [first_row] UpperCAmelCase_ : Union[str, Any] = current_set[1::] for row in current_set: UpperCAmelCase_ : List[str] = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(A ) continue for column_index in range(len(A ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(A ) # Create next recursion iteration set if len(final_set[0] ) != 3: UpperCAmelCase_ : List[Any] = final_set[0] UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : Optional[Any] = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) UpperCAmelCase_ : Union[str, Any] = simplify(A ) for i in range(len(A ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , A ) UpperCAmelCase_ : Optional[Any] = resultant return final_set def __UpperCAmelCase ( A : list[list] ) -> list: if len(A ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) UpperCAmelCase_ : Optional[int] = len(A ) + 1 if any(len(A ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(A , (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(A ) == 1: return [equations[0][-1] / equations[0][0]] UpperCAmelCase_ : Any = equations.copy() if any(0 in row for row in data_set ): UpperCAmelCase_ : List[Any] = data_set.copy() UpperCAmelCase_ : str = [] for row_index, row in enumerate(A ): if 0 not in row: UpperCAmelCase_ : int = data_set.pop(A ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0 , A ) UpperCAmelCase_ : Optional[Any] = data_set.copy() UpperCAmelCase_ : List[str] = simplify(A ) UpperCAmelCase_ : List[str] = simplified[::-1] UpperCAmelCase_ : list = [] for row in simplified: UpperCAmelCase_ : Optional[int] = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue UpperCAmelCase_ : Union[str, Any] = row.copy()[: len(A ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(A ) == 0: solutions.append(0 ) continue UpperCAmelCase_ : List[Any] = temp_row[1::] UpperCAmelCase_ : Dict = temp_row[::-1] for column_index, column in enumerate(A ): current_solution -= column * solutions[column_index] solutions.append(A ) UpperCAmelCase_ : Any = [] for item in solutions: final.append(float(round(A , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() _UpperCamelCase : str = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
304
'''simple docstring''' from __future__ import annotations def __UpperCAmelCase ( A : list , A : int , A : int , A : int ) -> list: UpperCAmelCase_ : Any = [] UpperCAmelCase_ , UpperCAmelCase_ : Tuple = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) UpperCAmelCase_ : List[Any] = result + left + right return input_list def __UpperCAmelCase ( A : list ) -> list: if len(A ) <= 1: return input_list UpperCAmelCase_ : List[str] = list(A ) # iteration for two-way merging UpperCAmelCase_ : Tuple = 2 while p <= len(A ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(A ) , A ): UpperCAmelCase_ : Union[str, Any] = i UpperCAmelCase_ : int = i + p - 1 UpperCAmelCase_ : Any = (low + high + 1) // 2 UpperCAmelCase_ : Union[str, Any] = merge(A , A , A , A ) # final merge of last two parts if p * 2 >= len(A ): UpperCAmelCase_ : str = i UpperCAmelCase_ : Tuple = merge(A , 0 , A , len(A ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": _UpperCamelCase : str = input('Enter numbers separated by a comma:\n').strip() if user_input == "": _UpperCamelCase : List[str] = [] else: _UpperCamelCase : Optional[int] = [int(item.strip()) for item in user_input.split(',')] print(iter_merge_sort(unsorted))
304
1
'''simple docstring''' _UpperCamelCase : Tuple = [ 999, 800, 799, 600, 599, 500, 400, 399, 377, 355, 333, 311, 288, 266, 244, 222, 200, 199, 177, 155, 133, 111, 88, 66, 44, 22, 0, ] _UpperCamelCase : Tuple = [ 999, 976, 952, 928, 905, 882, 858, 857, 810, 762, 715, 714, 572, 429, 428, 286, 285, 238, 190, 143, 142, 118, 95, 71, 47, 24, 0, ] _UpperCamelCase : Dict = [ 999, 988, 977, 966, 955, 944, 933, 922, 911, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 350, 300, 299, 266, 233, 200, 199, 179, 159, 140, 120, 100, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] _UpperCamelCase : int = [ 999, 995, 992, 989, 985, 981, 978, 975, 971, 967, 964, 961, 957, 956, 951, 947, 942, 937, 933, 928, 923, 919, 914, 913, 908, 903, 897, 892, 887, 881, 876, 871, 870, 864, 858, 852, 846, 840, 834, 828, 827, 820, 813, 806, 799, 792, 785, 784, 777, 770, 763, 756, 749, 742, 741, 733, 724, 716, 707, 699, 698, 688, 677, 666, 656, 655, 645, 634, 623, 613, 612, 598, 584, 570, 569, 555, 541, 527, 526, 505, 484, 483, 462, 440, 439, 396, 395, 352, 351, 308, 307, 264, 263, 220, 219, 176, 132, 88, 44, 0, ] _UpperCamelCase : List[str] = [ 999, 997, 995, 992, 990, 988, 986, 984, 981, 979, 977, 975, 972, 970, 968, 966, 964, 961, 959, 957, 956, 954, 951, 949, 946, 944, 941, 939, 936, 934, 931, 929, 926, 924, 921, 919, 916, 914, 913, 910, 907, 905, 902, 899, 896, 893, 891, 888, 885, 882, 879, 877, 874, 871, 870, 867, 864, 861, 858, 855, 852, 849, 846, 843, 840, 837, 834, 831, 828, 827, 824, 821, 817, 814, 811, 808, 804, 801, 798, 795, 791, 788, 785, 784, 780, 777, 774, 770, 766, 763, 760, 756, 752, 749, 746, 742, 741, 737, 733, 730, 726, 722, 718, 714, 710, 707, 703, 699, 698, 694, 690, 685, 681, 677, 673, 669, 664, 660, 656, 655, 650, 646, 641, 636, 632, 627, 622, 618, 613, 612, 607, 602, 596, 591, 586, 580, 575, 570, 569, 563, 557, 551, 545, 539, 533, 527, 526, 519, 512, 505, 498, 491, 484, 483, 474, 466, 457, 449, 440, 439, 428, 418, 407, 396, 395, 381, 366, 352, 351, 330, 308, 307, 286, 264, 263, 242, 220, 219, 176, 175, 132, 131, 88, 44, 0, ] _UpperCamelCase : Optional[int] = [ 999, 991, 982, 974, 966, 958, 950, 941, 933, 925, 916, 908, 900, 899, 874, 850, 825, 800, 799, 700, 600, 500, 400, 300, 200, 100, 0, ] _UpperCamelCase : List[Any] = [ 999, 992, 985, 978, 971, 964, 957, 949, 942, 935, 928, 921, 914, 907, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 300, 299, 200, 199, 100, 99, 0, ] _UpperCamelCase : int = [ 999, 996, 992, 989, 985, 982, 979, 975, 972, 968, 965, 961, 958, 955, 951, 948, 944, 941, 938, 934, 931, 927, 924, 920, 917, 914, 910, 907, 903, 900, 899, 891, 884, 876, 869, 861, 853, 846, 838, 830, 823, 815, 808, 800, 799, 788, 777, 766, 755, 744, 733, 722, 711, 700, 699, 688, 677, 666, 655, 644, 633, 622, 611, 600, 599, 585, 571, 557, 542, 528, 514, 500, 499, 485, 471, 457, 442, 428, 414, 400, 399, 379, 359, 340, 320, 300, 299, 279, 259, 240, 220, 200, 199, 166, 133, 100, 99, 66, 33, 0, ]
304
'''simple docstring''' from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class snake_case__ : a_ = 42 # [batch_size x 3] a_ = 42 # [batch_size x 3] a_ = 42 # [batch_size x 3] a_ = 42 # [batch_size x 3] a_ = 42 a_ = 42 a_ = 42 a_ = 42 a_ = 42 def A ( self : Tuple ) -> Optional[int]: assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def A ( self : List[Any] ) -> Union[str, Any]: return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def A ( self : Any ) -> Optional[Any]: return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def A ( self : Optional[int] ) -> torch.Tensor: UpperCAmelCase_ : Dict = torch.arange(self.height * self.width ) UpperCAmelCase_ : int = torch.stack( [ pixel_indices % self.width, torch.div(_A , self.width , rounding_mode='''trunc''' ), ] , axis=1 , ) return coords @property def A ( self : Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ , *UpperCAmelCase_ : Union[str, Any] = self.shape UpperCAmelCase_ : Optional[Any] = int(np.prod(_A ) ) UpperCAmelCase_ : Any = self.get_image_coords() UpperCAmelCase_ : Any = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) UpperCAmelCase_ : Union[str, Any] = self.get_camera_rays(_A ) UpperCAmelCase_ : str = rays.view(_A , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def A ( self : Optional[int] , _A : torch.Tensor ) -> torch.Tensor: UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] UpperCAmelCase_ : Dict = coords.view(_A , -1 , 2 ) UpperCAmelCase_ : Union[str, Any] = self.resolution() UpperCAmelCase_ : int = self.fov() UpperCAmelCase_ : Dict = (flat.float() / (res - 1)) * 2 - 1 UpperCAmelCase_ : Optional[int] = fracs * torch.tan(fov / 2 ) UpperCAmelCase_ : Any = fracs.view(_A , -1 , 2 ) UpperCAmelCase_ : List[Any] = ( self.z.view(_A , 1 , 3 ) + self.x.view(_A , 1 , 3 ) * fracs[:, :, :1] + self.y.view(_A , 1 , 3 ) * fracs[:, :, 1:] ) UpperCAmelCase_ : Optional[Any] = directions / directions.norm(dim=-1 , keepdim=_A ) UpperCAmelCase_ : Union[str, Any] = torch.stack( [ torch.broadcast_to(self.origin.view(_A , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(_A , *_A , 2 , 3 ) def A ( self : Tuple , _A : int , _A : int ) -> "DifferentiableProjectiveCamera": assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=_A , height=_A , x_fov=self.x_fov , y_fov=self.y_fov , ) def __UpperCAmelCase ( A : int ) -> DifferentiableProjectiveCamera: UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : str = [] for theta in np.linspace(0 , 2 * np.pi , num=2_0 ): UpperCAmelCase_ : str = np.array([np.sin(A ), np.cos(A ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) UpperCAmelCase_ : Optional[int] = -z * 4 UpperCAmelCase_ : Optional[int] = np.array([np.cos(A ), -np.sin(A ), 0.0] ) UpperCAmelCase_ : List[Any] = np.cross(A , A ) origins.append(A ) xs.append(A ) ys.append(A ) zs.append(A ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(A , axis=0 ) ).float() , x=torch.from_numpy(np.stack(A , axis=0 ) ).float() , y=torch.from_numpy(np.stack(A , axis=0 ) ).float() , z=torch.from_numpy(np.stack(A , axis=0 ) ).float() , width=A , height=A , x_fov=0.7 , y_fov=0.7 , shape=(1, len(A )) , )
304
1
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class snake_case__ ( UpperCamelCase): a_ = ["image_processor", "tokenizer"] a_ = "ChineseCLIPImageProcessor" a_ = ("BertTokenizer", "BertTokenizerFast") def __init__( self : Any , _A : Optional[int]=None , _A : Tuple=None , **_A : Dict ) -> List[Any]: UpperCAmelCase_ : Tuple = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _A , ) UpperCAmelCase_ : Any = kwargs.pop('''feature_extractor''' ) UpperCAmelCase_ : Optional[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_A , _A ) UpperCAmelCase_ : Optional[int] = self.image_processor def __call__( self : Optional[int] , _A : Union[str, Any]=None , _A : List[Any]=None , _A : str=None , **_A : Union[str, Any] ) -> int: if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: UpperCAmelCase_ : Optional[Any] = self.tokenizer(_A , return_tensors=_A , **_A ) if images is not None: UpperCAmelCase_ : Any = self.image_processor(_A , return_tensors=_A , **_A ) if text is not None and images is not None: UpperCAmelCase_ : Optional[Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A ) , tensor_type=_A ) def A ( self : Union[str, Any] , *_A : int , **_A : Any ) -> List[str]: return self.tokenizer.batch_decode(*_A , **_A ) def A ( self : Optional[Any] , *_A : Tuple , **_A : Union[str, Any] ) -> Tuple: return self.tokenizer.decode(*_A , **_A ) @property def A ( self : Optional[int] ) -> int: UpperCAmelCase_ : str = self.tokenizer.model_input_names UpperCAmelCase_ : Optional[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def A ( self : Dict ) -> str: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _A , ) return self.image_processor_class
304
'''simple docstring''' import random class snake_case__ : @staticmethod def A ( _A : str ) -> tuple[list[int], list[int]]: UpperCAmelCase_ : Dict = [ord(_A ) for i in text] UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : Any = [] for i in plain: UpperCAmelCase_ : int = random.randint(1 , 3_00 ) UpperCAmelCase_ : str = (i + k) * k cipher.append(_A ) key.append(_A ) return cipher, key @staticmethod def A ( _A : list[int] , _A : list[int] ) -> str: UpperCAmelCase_ : Dict = [] for i in range(len(_A ) ): UpperCAmelCase_ : int = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(_A ) ) return "".join(_A ) if __name__ == "__main__": _UpperCamelCase , _UpperCamelCase : Any = Onepad().encrypt('Hello') print(c, k) print(Onepad().decrypt(c, k))
304
1
'''simple docstring''' import logging from transformers.configuration_utils import PretrainedConfig _UpperCamelCase : Any = logging.getLogger(__name__) class snake_case__ ( UpperCamelCase): a_ = "masked_bert" def __init__( self : str , _A : Dict=3_05_22 , _A : Dict=7_68 , _A : Union[str, Any]=12 , _A : str=12 , _A : str=30_72 , _A : Dict="gelu" , _A : int=0.1 , _A : Optional[Any]=0.1 , _A : Any=5_12 , _A : Union[str, Any]=2 , _A : Union[str, Any]=0.02 , _A : int=1e-12 , _A : Any=0 , _A : Any="topK" , _A : List[str]="constant" , _A : Dict=0.0 , **_A : int , ) -> Union[str, Any]: super().__init__(pad_token_id=_A , **_A ) UpperCAmelCase_ : Union[str, Any] = vocab_size UpperCAmelCase_ : str = hidden_size UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : Optional[int] = num_attention_heads UpperCAmelCase_ : Optional[Any] = hidden_act UpperCAmelCase_ : str = intermediate_size UpperCAmelCase_ : int = hidden_dropout_prob UpperCAmelCase_ : Tuple = attention_probs_dropout_prob UpperCAmelCase_ : Optional[Any] = max_position_embeddings UpperCAmelCase_ : List[str] = type_vocab_size UpperCAmelCase_ : str = initializer_range UpperCAmelCase_ : Union[str, Any] = layer_norm_eps UpperCAmelCase_ : Optional[int] = pruning_method UpperCAmelCase_ : Optional[int] = mask_init UpperCAmelCase_ : List[Any] = mask_scale
304
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _UpperCamelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class snake_case__ ( UpperCamelCase , unittest.TestCase): a_ = ReformerTokenizer a_ = ReformerTokenizerFast a_ = True a_ = False a_ = True def A ( self : Optional[Any] ) -> List[Any]: super().setUp() UpperCAmelCase_ : Tuple = ReformerTokenizer(_A , keep_accents=_A ) tokenizer.save_pretrained(self.tmpdirname ) def A ( self : Optional[Any] ) -> Any: UpperCAmelCase_ : List[Any] = '''<s>''' UpperCAmelCase_ : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def A ( self : Any ) -> str: UpperCAmelCase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(_A ) , 10_00 ) def A ( self : Optional[int] ) -> int: self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def A ( self : Optional[Any] ) -> List[Any]: if not self.test_rust_tokenizer: return UpperCAmelCase_ : int = self.get_tokenizer() UpperCAmelCase_ : Tuple = self.get_rust_tokenizer() UpperCAmelCase_ : Any = '''I was born in 92000, and this is falsé.''' UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize(_A ) UpperCAmelCase_ : Optional[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) UpperCAmelCase_ : List[str] = tokenizer.encode(_A , add_special_tokens=_A ) UpperCAmelCase_ : int = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) UpperCAmelCase_ : Tuple = self.get_rust_tokenizer() UpperCAmelCase_ : Dict = tokenizer.encode(_A ) UpperCAmelCase_ : List[str] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def A ( self : Tuple , _A : Dict=15 ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(_A , **_A ) # Simple input UpperCAmelCase_ : Optional[int] = '''This is a simple input''' UpperCAmelCase_ : List[str] = ['''This is a simple input 1''', '''This is a simple input 2'''] UpperCAmelCase_ : Union[str, Any] = ('''This is a simple input''', '''This is a pair''') UpperCAmelCase_ : Dict = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' ) # Simple input self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' ) # Simple input self.assertRaises( _A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , ) # Pair input self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' ) # Pair input self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' ) # Pair input self.assertRaises( _A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , ) def A ( self : Union[str, Any] ) -> int: pass def A ( self : int ) -> Any: UpperCAmelCase_ : Any = ReformerTokenizer(_A , keep_accents=_A ) UpperCAmelCase_ : List[str] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_A ) , [2_85, 46, 10, 1_70, 3_82] , ) UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) UpperCAmelCase_ : List[str] = tokenizer.convert_tokens_to_ids(_A ) self.assertListEqual( _A , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def A ( self : List[str] ) -> Optional[int]: return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' ) @slow def A ( self : str ) -> str: UpperCAmelCase_ : Tuple = '''Hello World!''' UpperCAmelCase_ : int = [1_26, 32, 2_62, 1_52, 38, 72, 2_87] self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @slow def A ( self : List[Any] ) -> str: UpperCAmelCase_ : Tuple = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) UpperCAmelCase_ : int = [ 1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 35, 28, 2_75, 3, 2_59, 2_97, 2_60, 84, 4, 35, 1_10, 44, 8, 2_59, 91, 2_68, 21, 11, 2_09, 2_74, 1_09, 2_66, 2_77, 1_17, 86, 93, 3_15, 2_58, 2_78, 2_58, 2_77, 2_58, 0, 2_58, 2_88, 2_58, 3_19, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 2_87, 2_58, 3_15, 2_58, 2_89, 2_58, 2_78, 99, 2_69, 2_66, 2_62, 8, 2_59, 2_41, 4, 2_17, 2_30, 2_68, 2_66, 55, 1_68, 1_06, 75, 1_93, 2_66, 2_23, 27, 49, 26, 2_82, 25, 2_64, 2_99, 19, 26, 0, 2_58, 2_77, 1_17, 86, 93, 1_76, 1_83, 2_70, 11, 2_62, 42, 61, 2_65, ] self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @require_torch @slow def A ( self : List[str] ) -> Optional[int]: import torch from transformers import ReformerConfig, ReformerModel # Build sequence UpperCAmelCase_ : int = list(self.big_tokenizer.get_vocab().keys() )[:10] UpperCAmelCase_ : List[Any] = ''' '''.join(_A ) UpperCAmelCase_ : str = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' ) UpperCAmelCase_ : Any = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' ) UpperCAmelCase_ : List[Any] = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) UpperCAmelCase_ : Any = encoded_sequence['''input_ids'''].shape UpperCAmelCase_ : Optional[int] = ReformerModel(_A ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**_A ) model(**_A ) @slow def A ( self : int ) -> Optional[Any]: # fmt: off UpperCAmelCase_ : int = {'''input_ids''': [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 UpperCAmelCase_ : Optional[Any] = [ '''This is a very simple sentence.''', '''The quick brown fox jumps over the lazy dog.''', ] self.tokenizer_integration_test_util( expected_encoding=_A , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=_A , sequences=_A , )
304
1
'''simple docstring''' import colorsys from PIL import Image # type: ignore def __UpperCAmelCase ( A : float , A : float , A : int ) -> float: UpperCAmelCase_ : Dict = x UpperCAmelCase_ : List[str] = y for step in range(A ): # noqa: B007 UpperCAmelCase_ : Dict = a * a - b * b + x UpperCAmelCase_ : Tuple = 2 * a * b + y UpperCAmelCase_ : Dict = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def __UpperCAmelCase ( A : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return (2_5_5, 2_5_5, 2_5_5) def __UpperCAmelCase ( A : float ) -> tuple: if distance == 1: return (0, 0, 0) else: return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(A , 1 , 1 ) ) def __UpperCAmelCase ( A : int = 8_0_0 , A : int = 6_0_0 , A : float = -0.6 , A : float = 0 , A : float = 3.2 , A : int = 5_0 , A : bool = True , ) -> Image.Image: UpperCAmelCase_ : Union[str, Any] = Image.new('''RGB''' , (image_width, image_height) ) UpperCAmelCase_ : Any = img.load() # loop through the image-coordinates for image_x in range(A ): for image_y in range(A ): # determine the figure-coordinates based on the image-coordinates UpperCAmelCase_ : Tuple = figure_width / image_width * image_height UpperCAmelCase_ : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width UpperCAmelCase_ : int = figure_center_y + (image_y / image_height - 0.5) * figure_height UpperCAmelCase_ : Optional[Any] = get_distance(A , A , A ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: UpperCAmelCase_ : Dict = get_color_coded_rgb(A ) else: UpperCAmelCase_ : Tuple = get_black_and_white_rgb(A ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _UpperCamelCase : List[Any] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
304
'''simple docstring''' from __future__ import annotations def __UpperCAmelCase ( A : str ) -> list[int]: return [ord(A ) - 9_6 for elem in plain] def __UpperCAmelCase ( A : list[int] ) -> str: return "".join(chr(elem + 9_6 ) for elem in encoded ) def __UpperCAmelCase ( ) -> None: UpperCAmelCase_ : Tuple = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''' , A ) print('''Decoded:''' , decode(A ) ) if __name__ == "__main__": main()
304
1
'''simple docstring''' import warnings warnings.warn( 'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: ' '`from accelerate import find_executable_batch_size` to avoid this warning.', FutureWarning, )
304
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
304
1
'''simple docstring''' from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig _UpperCamelCase : str = logging.get_logger(__name__) # General docstring _UpperCamelCase : int = 'MobileNetV1Config' # Base docstring _UpperCamelCase : str = 'google/mobilenet_v1_1.0_224' _UpperCamelCase : str = [1, 1_024, 7, 7] # Image classification docstring _UpperCamelCase : str = 'google/mobilenet_v1_1.0_224' _UpperCamelCase : str = 'tabby, tabby cat' _UpperCamelCase : Any = [ 'google/mobilenet_v1_1.0_224', 'google/mobilenet_v1_0.75_192', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def __UpperCAmelCase ( A : Tuple , A : Dict , A : int=None ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = {} if isinstance(A , A ): UpperCAmelCase_ : Optional[Any] = model.mobilenet_va else: UpperCAmelCase_ : Optional[int] = model UpperCAmelCase_ : Dict = '''MobilenetV1/Conv2d_0/''' UpperCAmelCase_ : List[str] = backbone.conv_stem.convolution.weight UpperCAmelCase_ : int = backbone.conv_stem.normalization.bias UpperCAmelCase_ : int = backbone.conv_stem.normalization.weight UpperCAmelCase_ : List[str] = backbone.conv_stem.normalization.running_mean UpperCAmelCase_ : Dict = backbone.conv_stem.normalization.running_var for i in range(1_3 ): UpperCAmelCase_ : Union[str, Any] = i + 1 UpperCAmelCase_ : int = i * 2 UpperCAmelCase_ : List[Any] = backbone.layer[pt_index] UpperCAmelCase_ : str = F"MobilenetV1/Conv2d_{tf_index}_depthwise/" UpperCAmelCase_ : List[Any] = pointer.convolution.weight UpperCAmelCase_ : List[Any] = pointer.normalization.bias UpperCAmelCase_ : str = pointer.normalization.weight UpperCAmelCase_ : Any = pointer.normalization.running_mean UpperCAmelCase_ : Any = pointer.normalization.running_var UpperCAmelCase_ : Tuple = backbone.layer[pt_index + 1] UpperCAmelCase_ : Tuple = F"MobilenetV1/Conv2d_{tf_index}_pointwise/" UpperCAmelCase_ : int = pointer.convolution.weight UpperCAmelCase_ : int = pointer.normalization.bias UpperCAmelCase_ : List[str] = pointer.normalization.weight UpperCAmelCase_ : Union[str, Any] = pointer.normalization.running_mean UpperCAmelCase_ : Optional[int] = pointer.normalization.running_var if isinstance(A , A ): UpperCAmelCase_ : Union[str, Any] = '''MobilenetV1/Logits/Conv2d_1c_1x1/''' UpperCAmelCase_ : List[str] = model.classifier.weight UpperCAmelCase_ : List[Any] = model.classifier.bias return tf_to_pt_map def __UpperCAmelCase ( A : Dict , A : Any , A : List[str] ) -> List[str]: try: import numpy as np import tensorflow as tf except ImportError: logger.error( '''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ''' '''https://www.tensorflow.org/install/ for installation instructions.''' ) raise # Load weights from TF model UpperCAmelCase_ : int = tf.train.list_variables(A ) UpperCAmelCase_ : Any = {} for name, shape in init_vars: logger.info(F"Loading TF weight {name} with shape {shape}" ) UpperCAmelCase_ : List[str] = tf.train.load_variable(A , A ) UpperCAmelCase_ : int = array # Build TF to PyTorch weights loading map UpperCAmelCase_ : int = _build_tf_to_pytorch_map(A , A , A ) for name, pointer in tf_to_pt_map.items(): logger.info(F"Importing {name}" ) if name not in tf_weights: logger.info(F"{name} not in tf pre-trained weights, skipping" ) continue UpperCAmelCase_ : List[Any] = tf_weights[name] if "depthwise_weights" in name: logger.info('''Transposing depthwise''' ) UpperCAmelCase_ : Optional[int] = np.transpose(A , (2, 3, 0, 1) ) elif "weights" in name: logger.info('''Transposing''' ) if len(pointer.shape ) == 2: # copying into linear layer UpperCAmelCase_ : Dict = array.squeeze().transpose() else: UpperCAmelCase_ : List[str] = np.transpose(A , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" ) logger.info(F"Initialize PyTorch weight {name} {array.shape}" ) UpperCAmelCase_ : Dict = torch.from_numpy(A ) tf_weights.pop(A , A ) tf_weights.pop(name + '''/RMSProp''' , A ) tf_weights.pop(name + '''/RMSProp_1''' , A ) tf_weights.pop(name + '''/ExponentialMovingAverage''' , A ) logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" ) return model def __UpperCAmelCase ( A : torch.Tensor , A : nn.Convad ) -> torch.Tensor: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = features.shape[-2:] UpperCAmelCase_ , UpperCAmelCase_ : List[str] = conv_layer.stride UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = conv_layer.kernel_size if in_height % stride_height == 0: UpperCAmelCase_ : Optional[int] = max(kernel_height - stride_height , 0 ) else: UpperCAmelCase_ : Union[str, Any] = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: UpperCAmelCase_ : Optional[int] = max(kernel_width - stride_width , 0 ) else: UpperCAmelCase_ : Union[str, Any] = max(kernel_width - (in_width % stride_width) , 0 ) UpperCAmelCase_ : List[Any] = pad_along_width // 2 UpperCAmelCase_ : List[Any] = pad_along_width - pad_left UpperCAmelCase_ : Any = pad_along_height // 2 UpperCAmelCase_ : Optional[int] = pad_along_height - pad_top UpperCAmelCase_ : List[Any] = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(A , A , '''constant''' , 0.0 ) class snake_case__ ( nn.Module): def __init__( self : List[Any] , _A : MobileNetVaConfig , _A : int , _A : int , _A : int , _A : Optional[int] = 1 , _A : Optional[int] = 1 , _A : bool = False , _A : Optional[bool] = True , _A : Optional[bool or str] = True , ) -> None: super().__init__() UpperCAmelCase_ : Dict = config if in_channels % groups != 0: raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups." ) if out_channels % groups != 0: raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups." ) UpperCAmelCase_ : Optional[int] = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) UpperCAmelCase_ : List[str] = nn.Convad( in_channels=_A , out_channels=_A , kernel_size=_A , stride=_A , padding=_A , groups=_A , bias=_A , padding_mode='''zeros''' , ) if use_normalization: UpperCAmelCase_ : Any = nn.BatchNormad( num_features=_A , eps=config.layer_norm_eps , momentum=0.9_997 , affine=_A , track_running_stats=_A , ) else: UpperCAmelCase_ : Tuple = None if use_activation: if isinstance(_A , _A ): UpperCAmelCase_ : str = ACTaFN[use_activation] elif isinstance(config.hidden_act , _A ): UpperCAmelCase_ : Optional[Any] = ACTaFN[config.hidden_act] else: UpperCAmelCase_ : Any = config.hidden_act else: UpperCAmelCase_ : Optional[int] = None def A ( self : Any , _A : torch.Tensor ) -> torch.Tensor: if self.config.tf_padding: UpperCAmelCase_ : Optional[Any] = apply_tf_padding(_A , self.convolution ) UpperCAmelCase_ : int = self.convolution(_A ) if self.normalization is not None: UpperCAmelCase_ : Optional[Any] = self.normalization(_A ) if self.activation is not None: UpperCAmelCase_ : List[str] = self.activation(_A ) return features class snake_case__ ( UpperCamelCase): a_ = MobileNetVaConfig a_ = load_tf_weights_in_mobilenet_va a_ = "mobilenet_v1" a_ = "pixel_values" a_ = False def A ( self : Union[str, Any] , _A : Union[nn.Linear, nn.Convad] ) -> None: if isinstance(_A , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(_A , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) _UpperCamelCase : List[Any] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' _UpperCamelCase : Any = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( "The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , UpperCamelCase , ) class snake_case__ ( UpperCamelCase): def __init__( self : List[Any] , _A : MobileNetVaConfig , _A : bool = True ) -> List[str]: super().__init__(_A ) UpperCAmelCase_ : Union[str, Any] = config UpperCAmelCase_ : Any = 32 UpperCAmelCase_ : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth ) UpperCAmelCase_ : Tuple = MobileNetVaConvLayer( _A , in_channels=config.num_channels , out_channels=_A , kernel_size=3 , stride=2 , ) UpperCAmelCase_ : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] UpperCAmelCase_ : Optional[Any] = nn.ModuleList() for i in range(13 ): UpperCAmelCase_ : Optional[Any] = out_channels if strides[i] == 2 or i == 0: depth *= 2 UpperCAmelCase_ : Optional[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( _A , in_channels=_A , out_channels=_A , kernel_size=3 , stride=strides[i] , groups=_A , ) ) self.layer.append( MobileNetVaConvLayer( _A , in_channels=_A , out_channels=_A , kernel_size=1 , ) ) UpperCAmelCase_ : List[Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def A ( self : Union[str, Any] , _A : Optional[int] ) -> Optional[int]: raise NotImplementedError @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def A ( self : List[str] , _A : Optional[torch.Tensor] = None , _A : Optional[bool] = None , _A : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: UpperCAmelCase_ : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('''You have to specify pixel_values''' ) UpperCAmelCase_ : Optional[int] = self.conv_stem(_A ) UpperCAmelCase_ : Any = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): UpperCAmelCase_ : Optional[Any] = layer_module(_A ) if output_hidden_states: UpperCAmelCase_ : Union[str, Any] = all_hidden_states + (hidden_states,) UpperCAmelCase_ : int = hidden_states if self.pooler is not None: UpperCAmelCase_ : Dict = torch.flatten(self.pooler(_A ) , start_dim=1 ) else: UpperCAmelCase_ : Tuple = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_A , pooler_output=_A , hidden_states=_A , ) @add_start_docstrings( "\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCamelCase , ) class snake_case__ ( UpperCamelCase): def __init__( self : int , _A : MobileNetVaConfig ) -> None: super().__init__(_A ) UpperCAmelCase_ : int = config.num_labels UpperCAmelCase_ : str = MobileNetVaModel(_A ) UpperCAmelCase_ : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head UpperCAmelCase_ : Optional[int] = nn.Dropout(config.classifier_dropout_prob , inplace=_A ) UpperCAmelCase_ : Optional[int] = nn.Linear(_A , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def A ( self : Any , _A : Optional[torch.Tensor] = None , _A : Optional[bool] = None , _A : Optional[torch.Tensor] = None , _A : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: UpperCAmelCase_ : Any = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase_ : Union[str, Any] = self.mobilenet_va(_A , output_hidden_states=_A , return_dict=_A ) UpperCAmelCase_ : Tuple = outputs.pooler_output if return_dict else outputs[1] UpperCAmelCase_ : Tuple = self.classifier(self.dropout(_A ) ) UpperCAmelCase_ : Any = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: UpperCAmelCase_ : List[str] = '''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): UpperCAmelCase_ : Optional[int] = '''single_label_classification''' else: UpperCAmelCase_ : Union[str, Any] = '''multi_label_classification''' if self.config.problem_type == "regression": UpperCAmelCase_ : str = MSELoss() if self.num_labels == 1: UpperCAmelCase_ : str = loss_fct(logits.squeeze() , labels.squeeze() ) else: UpperCAmelCase_ : str = loss_fct(_A , _A ) elif self.config.problem_type == "single_label_classification": UpperCAmelCase_ : Optional[int] = CrossEntropyLoss() UpperCAmelCase_ : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": UpperCAmelCase_ : Dict = BCEWithLogitsLoss() UpperCAmelCase_ : Optional[int] = loss_fct(_A , _A ) if not return_dict: UpperCAmelCase_ : List[Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=_A , logits=_A , hidden_states=outputs.hidden_states , )
304
'''simple docstring''' def __UpperCAmelCase ( A : int ) -> list: # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError('''The given input must be positive''' ) # get the generated string sequence UpperCAmelCase_ : int = gray_code_sequence_string(A ) # # convert them to integers for i in range(len(A ) ): UpperCAmelCase_ : List[str] = int(sequence[i] , 2 ) return sequence def __UpperCAmelCase ( A : int ) -> list: # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] UpperCAmelCase_ : Tuple = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits UpperCAmelCase_ : List[str] = gray_code_sequence_string(bit_count - 1 ) UpperCAmelCase_ : int = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): UpperCAmelCase_ : Union[str, Any] = '''0''' + smaller_sequence[i] sequence.append(A ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): UpperCAmelCase_ : Dict = '''1''' + smaller_sequence[i] sequence.append(A ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
304
1
'''simple docstring''' def __UpperCAmelCase ( A : int ) -> list: # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError('''The given input must be positive''' ) # get the generated string sequence UpperCAmelCase_ : int = gray_code_sequence_string(A ) # # convert them to integers for i in range(len(A ) ): UpperCAmelCase_ : List[str] = int(sequence[i] , 2 ) return sequence def __UpperCAmelCase ( A : int ) -> list: # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] UpperCAmelCase_ : Tuple = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits UpperCAmelCase_ : List[str] = gray_code_sequence_string(bit_count - 1 ) UpperCAmelCase_ : int = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): UpperCAmelCase_ : Union[str, Any] = '''0''' + smaller_sequence[i] sequence.append(A ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): UpperCAmelCase_ : Dict = '''1''' + smaller_sequence[i] sequence.append(A ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
304
'''simple docstring''' import logging from transformers.configuration_utils import PretrainedConfig _UpperCamelCase : Any = logging.getLogger(__name__) class snake_case__ ( UpperCamelCase): a_ = "masked_bert" def __init__( self : str , _A : Dict=3_05_22 , _A : Dict=7_68 , _A : Union[str, Any]=12 , _A : str=12 , _A : str=30_72 , _A : Dict="gelu" , _A : int=0.1 , _A : Optional[Any]=0.1 , _A : Any=5_12 , _A : Union[str, Any]=2 , _A : Union[str, Any]=0.02 , _A : int=1e-12 , _A : Any=0 , _A : Any="topK" , _A : List[str]="constant" , _A : Dict=0.0 , **_A : int , ) -> Union[str, Any]: super().__init__(pad_token_id=_A , **_A ) UpperCAmelCase_ : Union[str, Any] = vocab_size UpperCAmelCase_ : str = hidden_size UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : Optional[int] = num_attention_heads UpperCAmelCase_ : Optional[Any] = hidden_act UpperCAmelCase_ : str = intermediate_size UpperCAmelCase_ : int = hidden_dropout_prob UpperCAmelCase_ : Tuple = attention_probs_dropout_prob UpperCAmelCase_ : Optional[Any] = max_position_embeddings UpperCAmelCase_ : List[str] = type_vocab_size UpperCAmelCase_ : str = initializer_range UpperCAmelCase_ : Union[str, Any] = layer_norm_eps UpperCAmelCase_ : Optional[int] = pruning_method UpperCAmelCase_ : Optional[int] = mask_init UpperCAmelCase_ : List[Any] = mask_scale
304
1
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging _UpperCamelCase : Tuple = logging.get_logger(__name__) class snake_case__ ( UpperCamelCase): a_ = ["audio_values", "audio_mask"] def __init__( self : Any , _A : int=20_48 , _A : str=1 , _A : str=[16, 16] , _A : Union[str, Any]=1_28 , _A : Optional[Any]=4_41_00 , _A : Dict=86 , _A : str=20_48 , _A : Optional[int]=0.0 , **_A : Any , ) -> str: super().__init__( feature_size=_A , sampling_rate=_A , padding_value=_A , **_A , ) UpperCAmelCase_ : Optional[int] = spectrogram_length UpperCAmelCase_ : Union[str, Any] = num_channels UpperCAmelCase_ : Optional[int] = patch_size UpperCAmelCase_ : Any = feature_size // self.patch_size[1] UpperCAmelCase_ : Dict = n_fft UpperCAmelCase_ : Any = sampling_rate // hop_length_to_sampling_rate UpperCAmelCase_ : Optional[Any] = sampling_rate UpperCAmelCase_ : Optional[Any] = padding_value UpperCAmelCase_ : int = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , ).T def A ( self : Optional[Any] , _A : np.array ) -> np.ndarray: UpperCAmelCase_ : Tuple = spectrogram( _A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , ) UpperCAmelCase_ : Dict = log_spec[:, :-1] UpperCAmelCase_ : List[Any] = log_spec - 20.0 UpperCAmelCase_ : Dict = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : Optional[int] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = True , _A : Optional[int] = None , _A : bool = False , _A : bool = False , **_A : Optional[int] , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( '''This feature extractor is set to support sampling rate''' F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled" F" with {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) UpperCAmelCase_ : List[str] = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}" ) UpperCAmelCase_ : Optional[int] = is_batched_numpy or ( isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase_ : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_A , np.ndarray ): UpperCAmelCase_ : List[Any] = np.asarray(_A , dtype=np.floataa ) elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCAmelCase_ : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase_ : Any = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis UpperCAmelCase_ : Dict = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , _A ): UpperCAmelCase_ : List[Any] = [np.asarray(_A , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask UpperCAmelCase_ : Optional[int] = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: UpperCAmelCase_ : int = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] UpperCAmelCase_ : Any = np.array(_A ).astype(np.floataa ) # convert into correct format for padding UpperCAmelCase_ : Any = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch UpperCAmelCase_ : Dict = np.ones([len(_A ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) UpperCAmelCase_ : str = padded_audio_features * self.padding_value for i in range(len(_A ) ): UpperCAmelCase_ : Dict = audio_features[i] UpperCAmelCase_ : int = feature # return as BatchFeature if return_attention_mask: UpperCAmelCase_ : Optional[Any] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask} else: UpperCAmelCase_ : int = {'''audio_values''': padded_audio_features} UpperCAmelCase_ : List[Any] = BatchFeature(data=_A , tensor_type=_A ) return encoded_inputs
304
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase): a_ = StableDiffusionDiffEditPipeline a_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"} a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"} a_ = frozenset( []) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess a_ = frozenset([]) def A ( self : Tuple ) -> Optional[Any]: torch.manual_seed(0 ) UpperCAmelCase_ : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , ) UpperCAmelCase_ : Optional[Any] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , ) UpperCAmelCase_ : Optional[int] = DDIMInverseScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_zero=_A , ) torch.manual_seed(0 ) UpperCAmelCase_ : List[str] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) UpperCAmelCase_ : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , ) UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_A ) UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCAmelCase_ : Optional[int] = { '''unet''': unet, '''scheduler''': scheduler, '''inverse_scheduler''': inverse_scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def A ( self : str , _A : List[str] , _A : Any=0 ) -> str: UpperCAmelCase_ : Optional[Any] = floats_tensor((1, 16, 16) , rng=random.Random(_A ) ).to(_A ) UpperCAmelCase_ : Dict = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_A ) ).to(_A ) if str(_A ).startswith('''mps''' ): UpperCAmelCase_ : Any = torch.manual_seed(_A ) else: UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase_ : str = { '''prompt''': '''a dog and a newt''', '''mask_image''': mask, '''image_latents''': latents, '''generator''': generator, '''num_inference_steps''': 2, '''inpaint_strength''': 1.0, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def A ( self : Tuple , _A : Optional[Any] , _A : Optional[Any]=0 ) -> List[str]: UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ : int = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ) if str(_A ).startswith('''mps''' ): UpperCAmelCase_ : Dict = torch.manual_seed(_A ) else: UpperCAmelCase_ : Any = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase_ : Optional[Any] = { '''image''': image, '''source_prompt''': '''a cat and a frog''', '''target_prompt''': '''a dog and a newt''', '''generator''': generator, '''num_inference_steps''': 2, '''num_maps_per_mask''': 2, '''mask_encode_strength''': 1.0, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def A ( self : int , _A : Tuple , _A : List[str]=0 ) -> Any: UpperCAmelCase_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) UpperCAmelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ) if str(_A ).startswith('''mps''' ): UpperCAmelCase_ : Optional[int] = torch.manual_seed(_A ) else: UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase_ : Optional[int] = { '''image''': image, '''prompt''': '''a cat and a frog''', '''generator''': generator, '''num_inference_steps''': 2, '''inpaint_strength''': 1.0, '''guidance_scale''': 6.0, '''decode_latents''': True, '''output_type''': '''numpy''', } return inputs def A ( self : List[str] ) -> Optional[Any]: if not hasattr(self.pipeline_class , '''_optional_components''' ): return UpperCAmelCase_ : str = self.get_dummy_components() UpperCAmelCase_ : Any = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(_A , _A , _A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_A ) UpperCAmelCase_ : str = pipe(**_A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_A ) UpperCAmelCase_ : Any = self.pipeline_class.from_pretrained(_A ) pipe_loaded.to(_A ) pipe_loaded.set_progress_bar_config(disable=_A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(_A , _A ) is None , F"`{optional_component}` did not stay set to None after loading." , ) UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_A ) UpperCAmelCase_ : List[Any] = pipe_loaded(**_A )[0] UpperCAmelCase_ : Any = np.abs(output - output_loaded ).max() self.assertLess(_A , 1e-4 ) def A ( self : Tuple ) -> int: UpperCAmelCase_ : Optional[Any] = '''cpu''' UpperCAmelCase_ : Any = self.get_dummy_components() UpperCAmelCase_ : Optional[int] = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Union[str, Any] = self.get_dummy_mask_inputs(_A ) UpperCAmelCase_ : int = pipe.generate_mask(**_A ) UpperCAmelCase_ : Tuple = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) UpperCAmelCase_ : List[Any] = np.array([0] * 9 ) UpperCAmelCase_ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(_A , 1e-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def A ( self : str ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = '''cpu''' UpperCAmelCase_ : str = self.get_dummy_components() UpperCAmelCase_ : str = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Optional[Any] = self.get_dummy_inversion_inputs(_A ) UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images UpperCAmelCase_ : List[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase_ : int = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) UpperCAmelCase_ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_A , 1e-3 ) def A ( self : Tuple ) -> Optional[Any]: super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def A ( self : str ) -> Tuple: UpperCAmelCase_ : Any = '''cpu''' UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components() UpperCAmelCase_ : Any = {'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''} UpperCAmelCase_ : Any = DPMSolverMultistepScheduler(**_A ) UpperCAmelCase_ : Optional[Any] = DPMSolverMultistepInverseScheduler(**_A ) UpperCAmelCase_ : Union[str, Any] = self.pipeline_class(**_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inversion_inputs(_A ) UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images UpperCAmelCase_ : Tuple = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase_ : List[Any] = np.array( [0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , ) UpperCAmelCase_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_A , 1e-3 ) @require_torch_gpu @slow class snake_case__ ( unittest.TestCase): def A ( self : Optional[Any] ) -> Optional[int]: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def A ( cls : Dict ) -> List[Any]: UpperCAmelCase_ : Optional[int] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' ) UpperCAmelCase_ : int = raw_image.convert('''RGB''' ).resize((7_68, 7_68) ) UpperCAmelCase_ : Any = raw_image def A ( self : List[Any] ) -> List[str]: UpperCAmelCase_ : int = torch.manual_seed(0 ) UpperCAmelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa ) UpperCAmelCase_ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase_ : List[str] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit''' UpperCAmelCase_ : Tuple = '''a bowl of pears''' UpperCAmelCase_ : Optional[int] = pipe.generate_mask( image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , ) UpperCAmelCase_ : List[str] = pipe.invert( prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A ).latents UpperCAmelCase_ : Any = pipe( prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0] UpperCAmelCase_ : str = ( np.array( load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/diffedit/pears.png''' ).resize((7_68, 7_68) ) ) / 2_55 ) assert np.abs((expected_image - image).max() ) < 5e-1 def A ( self : Tuple ) -> List[str]: UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Any = StableDiffusionDiffEditPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa ) UpperCAmelCase_ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase_ : Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit''' UpperCAmelCase_ : Dict = '''a bowl of pears''' UpperCAmelCase_ : Union[str, Any] = pipe.generate_mask( image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , ) UpperCAmelCase_ : List[Any] = pipe.invert( prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A , num_inference_steps=25 , ).latents UpperCAmelCase_ : Dict = pipe( prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0] UpperCAmelCase_ : Tuple = ( np.array( load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/diffedit/pears.png''' ).resize((7_68, 7_68) ) ) / 2_55 ) assert np.abs((expected_image - image).max() ) < 5e-1
304
1
'''simple docstring''' def __UpperCAmelCase ( A : list ) -> list: def merge(A : list , A : list ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(A ) <= 1: return collection UpperCAmelCase_ : Union[str, Any] = len(A ) // 2 return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() _UpperCamelCase : str = input('Enter numbers separated by a comma:\n').strip() _UpperCamelCase : Tuple = [int(item) for item in user_input.split(',')] print(*merge_sort(unsorted), sep=',')
304
'''simple docstring''' import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case__ ( UpperCamelCase): def A ( self : List[str] ) -> List[Any]: UpperCAmelCase_ : int = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) ) self.parent.assertTrue(hasattr(_A , '''num_heads''' ) ) class snake_case__ : def __init__( self : List[Any] , _A : List[str] , _A : Optional[Any]=13 , _A : List[str]=64 , _A : Tuple=3 , _A : int=[16, 48, 96] , _A : int=[1, 3, 6] , _A : Union[str, Any]=[1, 2, 10] , _A : List[Any]=[7, 3, 3] , _A : Optional[Any]=[4, 2, 2] , _A : List[Any]=[2, 1, 1] , _A : Union[str, Any]=[2, 2, 2] , _A : Tuple=[False, False, True] , _A : str=[0.0, 0.0, 0.0] , _A : List[Any]=0.02 , _A : int=1e-12 , _A : Optional[int]=True , _A : List[str]=True , _A : Union[str, Any]=2 , ) -> List[Any]: UpperCAmelCase_ : int = parent UpperCAmelCase_ : List[Any] = batch_size UpperCAmelCase_ : Any = image_size UpperCAmelCase_ : Tuple = patch_sizes UpperCAmelCase_ : int = patch_stride UpperCAmelCase_ : Any = patch_padding UpperCAmelCase_ : List[Any] = is_training UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : Union[str, Any] = num_labels UpperCAmelCase_ : List[str] = num_channels UpperCAmelCase_ : int = embed_dim UpperCAmelCase_ : Optional[int] = num_heads UpperCAmelCase_ : Tuple = stride_kv UpperCAmelCase_ : Optional[Any] = depth UpperCAmelCase_ : Dict = cls_token UpperCAmelCase_ : Dict = attention_drop_rate UpperCAmelCase_ : Any = initializer_range UpperCAmelCase_ : List[str] = layer_norm_eps def A ( self : int ) -> List[str]: UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase_ : List[str] = self.get_config() return config, pixel_values, labels def A ( self : List[str] ) -> int: return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def A ( self : Dict , _A : List[Any] , _A : Tuple , _A : Optional[Any] ) -> List[str]: UpperCAmelCase_ : List[Any] = CvtModel(config=_A ) model.to(_A ) model.eval() UpperCAmelCase_ : Tuple = model(_A ) UpperCAmelCase_ : List[str] = (self.image_size, self.image_size) UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image_size[0], image_size[1] for i in range(len(self.depth ) ): UpperCAmelCase_ : int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) UpperCAmelCase_ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def A ( self : Any , _A : int , _A : str , _A : Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : str = self.num_labels UpperCAmelCase_ : str = CvtForImageClassification(_A ) model.to(_A ) model.eval() UpperCAmelCase_ : int = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Dict ) -> Any: UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = config_and_inputs UpperCAmelCase_ : Optional[int] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase): a_ = (CvtModel, CvtForImageClassification) if is_torch_available() else () a_ = ( {"feature-extraction": CvtModel, "image-classification": CvtForImageClassification} if is_torch_available() else {} ) a_ = False a_ = False a_ = False a_ = False a_ = False def A ( self : int ) -> List[str]: UpperCAmelCase_ : Optional[int] = CvtModelTester(self ) UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def A ( self : Any ) -> Dict: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A ( self : int ) -> List[str]: return @unittest.skip(reason='''Cvt does not output attentions''' ) def A ( self : Optional[int] ) -> Optional[int]: pass @unittest.skip(reason='''Cvt does not use inputs_embeds''' ) def A ( self : Any ) -> Optional[Any]: pass @unittest.skip(reason='''Cvt does not support input and output embeddings''' ) def A ( self : List[Any] ) -> Any: pass def A ( self : int ) -> str: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Tuple = model_class(_A ) UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Tuple = [*signature.parameters.keys()] UpperCAmelCase_ : str = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _A ) def A ( self : Tuple ) -> int: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def A ( self : Dict ) -> List[str]: def check_hidden_states_output(_A : Dict , _A : str , _A : int ): UpperCAmelCase_ : str = model_class(_A ) model.to(_A ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(_A , _A ) ) UpperCAmelCase_ : Optional[Any] = outputs.hidden_states UpperCAmelCase_ : Any = len(self.model_tester.depth ) self.assertEqual(len(_A ) , _A ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = True check_hidden_states_output(_A , _A , _A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Dict = True check_hidden_states_output(_A , _A , _A ) def A ( self : Union[str, Any] ) -> List[str]: UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def A ( self : List[Any] ) -> Optional[Any]: pass @slow def A ( self : Optional[int] ) -> int: for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[Any] = CvtModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def __UpperCAmelCase ( ) -> str: UpperCAmelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class snake_case__ ( unittest.TestCase): @cached_property def A ( self : Union[str, Any] ) -> Union[str, Any]: return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def A ( self : str ) -> str: UpperCAmelCase_ : str = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_A ) UpperCAmelCase_ : Optional[int] = self.default_image_processor UpperCAmelCase_ : List[str] = prepare_img() UpperCAmelCase_ : List[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Any = model(**_A ) # verify the logits UpperCAmelCase_ : Tuple = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , _A ) UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(_A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
304
1
'''simple docstring''' from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class snake_case__ : def __init__( self : Dict , _A : Optional[int] , _A : List[Any]=13 , _A : str=30 , _A : int=2 , _A : List[Any]=3 , _A : List[Any]=True , _A : List[Any]=True , _A : Any=32 , _A : int=2 , _A : List[str]=4 , _A : List[Any]=37 , _A : Any="gelu" , _A : List[str]=0.1 , _A : int=0.1 , _A : Optional[int]=10 , _A : List[str]=0.02 , _A : str=3 , _A : str=None , ) -> List[Any]: UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : Tuple = image_size UpperCAmelCase_ : str = patch_size UpperCAmelCase_ : List[Any] = num_channels UpperCAmelCase_ : int = is_training UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : int = hidden_size UpperCAmelCase_ : List[Any] = num_hidden_layers UpperCAmelCase_ : Optional[int] = num_attention_heads UpperCAmelCase_ : Dict = intermediate_size UpperCAmelCase_ : List[str] = hidden_act UpperCAmelCase_ : int = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Optional[int] = type_sequence_label_size UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : int = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ : str = (image_size // patch_size) ** 2 UpperCAmelCase_ : Optional[int] = num_patches + 1 def A ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Tuple = self.get_config() return config, pixel_values, labels def A ( self : List[Any] ) -> Optional[Any]: return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , ) def A ( self : Any , _A : Optional[Any] , _A : Tuple , _A : Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = TFViTModel(config=_A ) UpperCAmelCase_ : Dict = model(_A , training=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. UpperCAmelCase_ : Optional[int] = self.image_size // 2 UpperCAmelCase_ : List[str] = pixel_values[:, :, :image_size, :image_size] UpperCAmelCase_ : Optional[Any] = model(_A , interpolate_pos_encoding=_A , training=_A ) UpperCAmelCase_ : Optional[int] = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def A ( self : Union[str, Any] , _A : str , _A : Tuple , _A : List[str] ) -> Dict: UpperCAmelCase_ : Optional[int] = self.type_sequence_label_size UpperCAmelCase_ : Dict = TFViTForImageClassification(_A ) UpperCAmelCase_ : Tuple = model(_A , labels=_A , training=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. UpperCAmelCase_ : List[str] = self.image_size // 2 UpperCAmelCase_ : List[str] = pixel_values[:, :, :image_size, :image_size] UpperCAmelCase_ : List[Any] = model(_A , interpolate_pos_encoding=_A , training=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ : Dict = 1 UpperCAmelCase_ : List[Any] = TFViTForImageClassification(_A ) UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : Any = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def A ( self : List[str] ) -> Optional[Any]: UpperCAmelCase_ : str = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = config_and_inputs UpperCAmelCase_ : Optional[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase): a_ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () a_ = ( {"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification} if is_tf_available() else {} ) a_ = False a_ = False a_ = False def A ( self : List[str] ) -> List[str]: UpperCAmelCase_ : Optional[int] = TFViTModelTester(self ) UpperCAmelCase_ : Union[str, Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def A ( self : Any ) -> List[str]: self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def A ( self : str ) -> str: pass @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def A ( self : List[str] ) -> Any: pass def A ( self : Tuple ) -> Dict: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : List[str] = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) UpperCAmelCase_ : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) ) def A ( self : List[str] ) -> List[str]: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = model_class(_A ) UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Union[str, Any] = [*signature.parameters.keys()] UpperCAmelCase_ : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _A ) def A ( self : Union[str, Any] ) -> Tuple: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def A ( self : List[str] ) -> Optional[int]: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) @slow def A ( self : Tuple ) -> Tuple: UpperCAmelCase_ : str = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(_A ) def __UpperCAmelCase ( ) -> List[str]: UpperCAmelCase_ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class snake_case__ ( unittest.TestCase): @cached_property def A ( self : int ) -> Union[str, Any]: return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def A ( self : Dict ) -> int: UpperCAmelCase_ : Tuple = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ) UpperCAmelCase_ : Union[str, Any] = self.default_image_processor UpperCAmelCase_ : Dict = prepare_img() UpperCAmelCase_ : List[Any] = image_processor(images=_A , return_tensors='''tf''' ) # forward pass UpperCAmelCase_ : Optional[int] = model(**_A ) # verify the logits UpperCAmelCase_ : Any = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , _A ) UpperCAmelCase_ : Tuple = tf.constant([-0.2_744, 0.8_215, -0.0_836] ) tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1e-4 )
304
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase) class snake_case__ ( UpperCamelCase): a_ = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True}) a_ = Features({"text": Value("string")}) a_ = Features({}) a_ = "text" @property def A ( self : List[str] ) -> Dict[str, str]: return {self.text_column: "text"}
304
1
'''simple docstring''' import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class snake_case__ : @staticmethod def A ( *_A : Union[str, Any] , **_A : str ) -> int: pass @is_pipeline_test @require_torch @require_vision class snake_case__ ( unittest.TestCase): a_ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def A ( self : Dict , _A : List[Any] , _A : int , _A : int ) -> Any: UpperCAmelCase_ : List[Any] = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' ) UpperCAmelCase_ : Optional[int] = [ { '''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''question''': '''How many cats are there?''', }, { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''question''': '''How many cats are there?''', }, ] return vqa_pipeline, examples def A ( self : List[Any] , _A : int , _A : List[Any] ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = vqa_pipeline(_A , top_k=1 ) self.assertEqual( _A , [ [{'''score''': ANY(_A ), '''answer''': ANY(_A )}], [{'''score''': ANY(_A ), '''answer''': ANY(_A )}], ] , ) @require_torch def A ( self : str ) -> List[Any]: UpperCAmelCase_ : Tuple = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' ) UpperCAmelCase_ : Union[str, Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' UpperCAmelCase_ : int = '''How many cats are there?''' UpperCAmelCase_ : int = vqa_pipeline(image=_A , question='''How many cats are there?''' , top_k=2 ) self.assertEqual( _A , [{'''score''': ANY(_A ), '''answer''': ANY(_A )}, {'''score''': ANY(_A ), '''answer''': ANY(_A )}] ) UpperCAmelCase_ : Optional[int] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( _A , [{'''score''': ANY(_A ), '''answer''': ANY(_A )}, {'''score''': ANY(_A ), '''answer''': ANY(_A )}] ) @slow @require_torch def A ( self : Tuple ) -> Optional[int]: UpperCAmelCase_ : Optional[Any] = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' ) UpperCAmelCase_ : Optional[Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' UpperCAmelCase_ : Union[str, Any] = '''How many cats are there?''' UpperCAmelCase_ : List[Any] = vqa_pipeline(image=_A , question=_A , top_k=2 ) self.assertEqual( nested_simplify(_A , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] ) UpperCAmelCase_ : Tuple = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(_A , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] ) UpperCAmelCase_ : int = vqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(_A , decimals=4 ) , [[{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , ) @require_tf @unittest.skip('''Visual question answering not implemented in TF''' ) def A ( self : List[Any] ) -> List[Any]: pass
304
'''simple docstring''' import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def __UpperCAmelCase ( A : int , A : Any="shi-labs/oneformer_demo" ) -> Dict: with open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) as f: UpperCAmelCase_ : Union[str, Any] = json.load(A ) UpperCAmelCase_ : Optional[int] = {} UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : str = [] for key, info in class_info.items(): UpperCAmelCase_ : Tuple = info['''name'''] class_names.append(info['''name'''] ) if info["isthing"]: thing_ids.append(int(A ) ) UpperCAmelCase_ : Any = thing_ids UpperCAmelCase_ : Union[str, Any] = class_names return metadata class snake_case__ ( unittest.TestCase): def __init__( self : Any , _A : str , _A : Optional[int]=7 , _A : Tuple=3 , _A : Tuple=30 , _A : List[Any]=4_00 , _A : Tuple=None , _A : Optional[Any]=True , _A : Optional[Any]=True , _A : Any=[0.5, 0.5, 0.5] , _A : Any=[0.5, 0.5, 0.5] , _A : List[str]=10 , _A : Optional[int]=False , _A : Union[str, Any]=2_55 , _A : List[Any]="shi-labs/oneformer_demo" , _A : str="ade20k_panoptic.json" , _A : List[Any]=10 , ) -> Any: UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : Optional[Any] = batch_size UpperCAmelCase_ : Optional[Any] = num_channels UpperCAmelCase_ : Tuple = min_resolution UpperCAmelCase_ : Optional[int] = max_resolution UpperCAmelCase_ : Dict = do_resize UpperCAmelCase_ : Tuple = {'''shortest_edge''': 32, '''longest_edge''': 13_33} if size is None else size UpperCAmelCase_ : int = do_normalize UpperCAmelCase_ : List[Any] = image_mean UpperCAmelCase_ : Dict = image_std UpperCAmelCase_ : str = class_info_file UpperCAmelCase_ : Optional[Any] = prepare_metadata(_A , _A ) UpperCAmelCase_ : Tuple = num_text UpperCAmelCase_ : Union[str, Any] = repo_path # for the post_process_functions UpperCAmelCase_ : Any = 2 UpperCAmelCase_ : Dict = 10 UpperCAmelCase_ : int = 10 UpperCAmelCase_ : Optional[Any] = 3 UpperCAmelCase_ : str = 4 UpperCAmelCase_ : int = num_labels UpperCAmelCase_ : Union[str, Any] = do_reduce_labels UpperCAmelCase_ : str = ignore_index def A ( self : Dict ) -> List[Any]: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def A ( self : Any , _A : List[Any] , _A : List[str]=False ) -> Optional[Any]: if not batched: UpperCAmelCase_ : Any = image_inputs[0] if isinstance(_A , Image.Image ): UpperCAmelCase_ , UpperCAmelCase_ : Dict = image.size else: UpperCAmelCase_ , UpperCAmelCase_ : int = image.shape[1], image.shape[2] if w < h: UpperCAmelCase_ : Union[str, Any] = int(self.size['''shortest_edge'''] * h / w ) UpperCAmelCase_ : int = self.size['''shortest_edge'''] elif w > h: UpperCAmelCase_ : List[Any] = self.size['''shortest_edge'''] UpperCAmelCase_ : Any = int(self.size['''shortest_edge'''] * w / h ) else: UpperCAmelCase_ : Dict = self.size['''shortest_edge'''] UpperCAmelCase_ : str = self.size['''shortest_edge'''] else: UpperCAmelCase_ : Dict = [] for image in image_inputs: UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase_ : int = max(_A , key=lambda _A : item[0] )[0] UpperCAmelCase_ : List[str] = max(_A , key=lambda _A : item[1] )[1] return expected_height, expected_width def A ( self : Tuple ) -> str: return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class snake_case__ ( UpperCamelCase , unittest.TestCase): a_ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string a_ = image_processing_class def A ( self : Optional[int] ) -> Any: UpperCAmelCase_ : int = OneFormerImageProcessorTester(self ) @property def A ( self : Any ) -> int: return self.image_processing_tester.prepare_image_processor_dict() def A ( self : Optional[Any] ) -> List[Any]: UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''ignore_index''' ) ) self.assertTrue(hasattr(_A , '''class_info_file''' ) ) self.assertTrue(hasattr(_A , '''num_text''' ) ) self.assertTrue(hasattr(_A , '''repo_path''' ) ) self.assertTrue(hasattr(_A , '''metadata''' ) ) self.assertTrue(hasattr(_A , '''do_reduce_labels''' ) ) def A ( self : Dict ) -> Dict: pass def A ( self : Tuple ) -> Dict: # Initialize image_processor UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input UpperCAmelCase_ : str = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.image_processing_tester.get_expected_values(_A , batched=_A ) UpperCAmelCase_ : int = image_processor( _A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def A ( self : Tuple ) -> Tuple: # Initialize image_processor UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input UpperCAmelCase_ : List[str] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.image_processing_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ , UpperCAmelCase_ : str = self.image_processing_tester.get_expected_values(_A , batched=_A ) UpperCAmelCase_ : Tuple = image_processor( _A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def A ( self : Dict ) -> Union[str, Any]: # Initialize image_processor UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input UpperCAmelCase_ : int = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.image_processing_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ , UpperCAmelCase_ : int = self.image_processing_tester.get_expected_values(_A , batched=_A ) UpperCAmelCase_ : Optional[int] = image_processor( _A , ['''semantic'''] * len(_A ) , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def A ( self : int , _A : Any=False , _A : List[Any]=False , _A : Any="np" ) -> str: UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # prepare image and target UpperCAmelCase_ : Tuple = self.image_processing_tester.num_labels UpperCAmelCase_ : int = None UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_A ) if with_segmentation_maps: UpperCAmelCase_ : Any = num_labels if is_instance_map: UpperCAmelCase_ : Any = list(range(_A ) ) * 2 UpperCAmelCase_ : Optional[Any] = dict(enumerate(_A ) ) UpperCAmelCase_ : Dict = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": UpperCAmelCase_ : Dict = [Image.fromarray(_A ) for annotation in annotations] UpperCAmelCase_ : Tuple = image_processor( _A , ['''semantic'''] * len(_A ) , _A , return_tensors='''pt''' , instance_id_to_semantic_id=_A , pad_and_return_pixel_mask=_A , ) return inputs def A ( self : int ) -> str: pass def A ( self : Tuple ) -> Union[str, Any]: def common(_A : Optional[int]=False , _A : str=None ): UpperCAmelCase_ : List[str] = self.comm_get_image_processor_inputs( with_segmentation_maps=_A , is_instance_map=_A , segmentation_type=_A ) UpperCAmelCase_ : List[Any] = inputs['''mask_labels'''] UpperCAmelCase_ : Optional[Any] = inputs['''class_labels'''] UpperCAmelCase_ : int = inputs['''pixel_values'''] UpperCAmelCase_ : Tuple = inputs['''text_inputs'''] # check the batch_size for mask_label, class_label, text_input in zip(_A , _A , _A ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(_A ) , self.image_processing_tester.num_text ) common() common(is_instance_map=_A ) common(is_instance_map=_A , segmentation_type='''pil''' ) common(is_instance_map=_A , segmentation_type='''pil''' ) def A ( self : List[Any] ) -> List[Any]: UpperCAmelCase_ : int = np.zeros((20, 50) ) UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : Dict = 1 UpperCAmelCase_ : List[Any] = 1 UpperCAmelCase_ : List[Any] = binary_mask_to_rle(_A ) self.assertEqual(len(_A ) , 4 ) self.assertEqual(rle[0] , 21 ) self.assertEqual(rle[1] , 45 ) def A ( self : Any ) -> List[Any]: UpperCAmelCase_ : int = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) UpperCAmelCase_ : Any = self.image_processing_tester.get_fake_oneformer_outputs() UpperCAmelCase_ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(_A ) self.assertEqual(len(_A ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) UpperCAmelCase_ : List[str] = [(1, 4) for i in range(self.image_processing_tester.batch_size )] UpperCAmelCase_ : Any = fature_extractor.post_process_semantic_segmentation(_A , target_sizes=_A ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def A ( self : Optional[Any] ) -> Tuple: UpperCAmelCase_ : Any = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) UpperCAmelCase_ : Dict = self.image_processing_tester.get_fake_oneformer_outputs() UpperCAmelCase_ : List[Any] = image_processor.post_process_instance_segmentation(_A , threshold=0 ) self.assertTrue(len(_A ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('''segmentation''' in el ) self.assertTrue('''segments_info''' in el ) self.assertEqual(type(el['''segments_info'''] ) , _A ) self.assertEqual( el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def A ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , ) UpperCAmelCase_ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs() UpperCAmelCase_ : List[Any] = image_processor.post_process_panoptic_segmentation(_A , threshold=0 ) self.assertTrue(len(_A ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('''segmentation''' in el ) self.assertTrue('''segments_info''' in el ) self.assertEqual(type(el['''segments_info'''] ) , _A ) self.assertEqual( el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
304
1
'''simple docstring''' _UpperCamelCase : Tuple = [ (1_000, 'M'), (900, 'CM'), (500, 'D'), (400, 'CD'), (100, 'C'), (90, 'XC'), (50, 'L'), (40, 'XL'), (10, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'), (1, 'I'), ] def __UpperCAmelCase ( A : str ) -> int: UpperCAmelCase_ : Dict = {'''I''': 1, '''V''': 5, '''X''': 1_0, '''L''': 5_0, '''C''': 1_0_0, '''D''': 5_0_0, '''M''': 1_0_0_0} UpperCAmelCase_ : Any = 0 UpperCAmelCase_ : Optional[Any] = 0 while place < len(A ): if (place + 1 < len(A )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def __UpperCAmelCase ( A : int ) -> str: UpperCAmelCase_ : Optional[Any] = [] for arabic, roman in ROMAN: ((UpperCAmelCase_) , (UpperCAmelCase_)) : Dict = divmod(A , A ) result.append(roman * factor ) if number == 0: break return "".join(A ) if __name__ == "__main__": import doctest doctest.testmod()
304
'''simple docstring''' import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py _UpperCamelCase : Optional[int] = 'src/transformers' # This is to make sure the transformers module imported is the one in the repo. _UpperCamelCase : List[str] = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. _UpperCamelCase : Tuple = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') _UpperCamelCase : str = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _UpperCamelCase : Optional[int] = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # Fill this with tuples (pipeline_tag, model_mapping, auto_model) _UpperCamelCase : List[str] = [ ('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'), ('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'), ('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'), ('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'), ('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'), ('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'), ('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'), ('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'), ('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'), ( 'zero-shot-object-detection', 'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForZeroShotObjectDetection', ), ('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'), ('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'), ('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'), ('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'), ( 'table-question-answering', 'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForTableQuestionAnswering', ), ('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'), ('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'), ( 'next-sentence-prediction', 'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES', 'AutoModelForNextSentencePrediction', ), ( 'audio-frame-classification', 'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioFrameClassification', ), ('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'), ( 'document-question-answering', 'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForDocumentQuestionAnswering', ), ( 'visual-question-answering', 'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForVisualQuestionAnswering', ), ('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'), ( 'zero-shot-image-classification', 'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForZeroShotImageClassification', ), ('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'), ('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'), ('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'), ] def __UpperCAmelCase ( A : Optional[int] ) -> int: UpperCAmelCase_ : Dict = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , A ) return [m.group(0 ) for m in matches] def __UpperCAmelCase ( ) -> str: UpperCAmelCase_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES UpperCAmelCase_ : Optional[Any] = { config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. UpperCAmelCase_ : Dict = collections.defaultdict(A ) UpperCAmelCase_ : str = collections.defaultdict(A ) UpperCAmelCase_ : int = collections.defaultdict(A ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(A ): UpperCAmelCase_ : int = None if _re_tf_models.match(A ) is not None: UpperCAmelCase_ : Optional[Any] = tf_models UpperCAmelCase_ : Optional[int] = _re_tf_models.match(A ).groups()[0] elif _re_flax_models.match(A ) is not None: UpperCAmelCase_ : int = flax_models UpperCAmelCase_ : Any = _re_flax_models.match(A ).groups()[0] elif _re_pt_models.match(A ) is not None: UpperCAmelCase_ : Union[str, Any] = pt_models UpperCAmelCase_ : List[Any] = _re_pt_models.match(A ).groups()[0] if lookup_dict is not None: while len(A ) > 0: if attr_name in model_prefix_to_model_type: UpperCAmelCase_ : Optional[int] = True break # Try again after removing the last word in the name UpperCAmelCase_ : List[Any] = ''''''.join(camel_case_split(A )[:-1] ) UpperCAmelCase_ : Tuple = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) UpperCAmelCase_ : List[Any] = list(A ) all_models.sort() UpperCAmelCase_ : Dict = {'''model_type''': all_models} UpperCAmelCase_ : Tuple = [pt_models[t] for t in all_models] UpperCAmelCase_ : Dict = [tf_models[t] for t in all_models] UpperCAmelCase_ : Optional[int] = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure UpperCAmelCase_ : int = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: UpperCAmelCase_ : Any = '''AutoProcessor''' elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: UpperCAmelCase_ : Union[str, Any] = '''AutoTokenizer''' elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: UpperCAmelCase_ : int = '''AutoFeatureExtractor''' else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. UpperCAmelCase_ : Dict = '''AutoTokenizer''' UpperCAmelCase_ : str = [processors[t] for t in all_models] return pd.DataFrame(A ) def __UpperCAmelCase ( A : Optional[int] ) -> str: UpperCAmelCase_ : int = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: UpperCAmelCase_ : Tuple = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"] UpperCAmelCase_ : Tuple = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"] # Loop through all three frameworks for module, cls, mapping in zip(A , A , A ): # The type of pipeline may not exist in this framework if not hasattr(A , A ): continue # First extract all model_names UpperCAmelCase_ : List[str] = [] for name in getattr(A , A ).values(): if isinstance(A , A ): model_names.append(A ) else: model_names.extend(list(A ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def __UpperCAmelCase ( A : int , A : Any ) -> Tuple: UpperCAmelCase_ : Tuple = get_frameworks_table() UpperCAmelCase_ : Any = Dataset.from_pandas(A ) UpperCAmelCase_ : str = hf_hub_download( '''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=A ) UpperCAmelCase_ : Union[str, Any] = Dataset.from_json(A ) UpperCAmelCase_ : Optional[int] = { tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class''']) for i in range(len(A ) ) } UpperCAmelCase_ : str = update_pipeline_and_auto_class_table(A ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. UpperCAmelCase_ : Union[str, Any] = sorted(table.keys() ) UpperCAmelCase_ : Optional[Any] = pd.DataFrame( { '''model_class''': model_classes, '''pipeline_tag''': [table[m][0] for m in model_classes], '''auto_class''': [table[m][1] for m in model_classes], } ) UpperCAmelCase_ : Dict = Dataset.from_pandas(A ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(A , '''frameworks.json''' ) ) tags_dataset.to_json(os.path.join(A , '''pipeline_tags.json''' ) ) if commit_sha is not None: UpperCAmelCase_ : List[str] = ( F"Update with commit {commit_sha}\n\nSee: " F"https://github.com/huggingface/transformers/commit/{commit_sha}" ) else: UpperCAmelCase_ : int = '''Update''' upload_folder( repo_id='''huggingface/transformers-metadata''' , folder_path=A , repo_type='''dataset''' , token=A , commit_message=A , ) def __UpperCAmelCase ( ) -> int: UpperCAmelCase_ : str = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} UpperCAmelCase_ : List[str] = transformers_module.pipelines.SUPPORTED_TASKS UpperCAmelCase_ : List[str] = [] for key in pipeline_tasks: if key not in in_table: UpperCAmelCase_ : Optional[Any] = pipeline_tasks[key]['''pt'''] if isinstance(A , (list, tuple) ): UpperCAmelCase_ : Dict = model[0] UpperCAmelCase_ : Any = model.__name__ if model not in in_table.values(): missing.append(A ) if len(A ) > 0: UpperCAmelCase_ : List[Any] = ''', '''.join(A ) raise ValueError( '''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside ''' F"`utils/update_metadata.py`: {msg}. Please add them!" ) if __name__ == "__main__": _UpperCamelCase : int = argparse.ArgumentParser() parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.') parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.') parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.') _UpperCamelCase : Tuple = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
304
1
'''simple docstring''' from collections.abc import Callable import numpy as np def __UpperCAmelCase ( A : Callable , A : float , A : float , A : float , A : float ) -> np.ndarray: UpperCAmelCase_ : Any = int(np.ceil((x_end - xa) / step_size ) ) UpperCAmelCase_ : int = np.zeros((n + 1,) ) UpperCAmelCase_ : Dict = ya UpperCAmelCase_ : Optional[int] = xa for k in range(A ): UpperCAmelCase_ : str = y[k] + step_size * ode_func(A , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
304
'''simple docstring''' import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _UpperCamelCase : Union[str, Any] = logging.getLogger(__name__) _UpperCamelCase : Optional[int] = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) _UpperCamelCase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class snake_case__ : a_ = field( default=UpperCamelCase , metadata={ "help": ( "The model checkpoint for weights initialization. Leave None if you want to train a model from" " scratch." ) } , ) a_ = field( default=UpperCamelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCamelCase)} , ) a_ = field( default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"}) a_ = field( default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) a_ = field( default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class snake_case__ : a_ = field( default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."}) a_ = field( default=UpperCamelCase , metadata={ "help": ( "The input training data files (multiple files in glob format). " "Very often splitting large files to smaller files can prevent tokenizer going out of memory" ) } , ) a_ = field( default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) a_ = field( default=UpperCamelCase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , ) a_ = field( default=UpperCamelCase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , ) a_ = field( default=UpperCamelCase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , ) a_ = field( default=UpperCamelCase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."}) a_ = field(default=UpperCamelCase , metadata={"help": "Whether ot not to use whole word mask."}) a_ = field( default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}) a_ = field( default=1 / 6 , metadata={ "help": ( "Ratio of length of a span of masked tokens to surrounding context length for permutation language" " modeling." ) } , ) a_ = field( default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."}) a_ = field( default=-1 , metadata={ "help": ( "Optional input sequence length after tokenization." "The training dataset will be truncated in block of this size for training." "Default to the model max input length for single sentence inputs (take into account special tokens)." ) } , ) a_ = field( default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"}) def __UpperCAmelCase ( A : DataTrainingArguments , A : PreTrainedTokenizer , A : bool = False , A : Optional[str] = None , ) -> List[Any]: def _dataset(A : Dict , A : str=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' ) return LineByLineWithRefDataset( tokenizer=A , file_path=A , block_size=args.block_size , ref_path=A , ) return LineByLineTextDataset(tokenizer=A , file_path=A , block_size=args.block_size ) else: return TextDataset( tokenizer=A , file_path=A , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=A , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(A ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def __UpperCAmelCase ( ) -> Optional[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCAmelCase_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( '''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file ''' '''or remove the --do_eval argument.''' ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. Use" ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , A ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: UpperCAmelCase_ : List[Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.tokenizer_name: UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another''' ''' script, save it,and load it from here, using --tokenizer_name''' ) if model_args.model_name_or_path: UpperCAmelCase_ : str = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , ) else: logger.info('''Training new model from scratch''' ) UpperCAmelCase_ : int = AutoModelWithLMHead.from_config(A ) model.resize_token_embeddings(len(A ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( '''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the''' '''--mlm flag (masked language modeling).''' ) if data_args.block_size <= 0: UpperCAmelCase_ : List[str] = tokenizer.max_len # Our input block size will be the max possible for the model else: UpperCAmelCase_ : Dict = min(data_args.block_size , tokenizer.max_len ) # Get datasets UpperCAmelCase_ : str = ( get_dataset(A , tokenizer=A , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) UpperCAmelCase_ : Any = ( get_dataset(A , tokenizer=A , evaluate=A , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": UpperCAmelCase_ : Optional[int] = DataCollatorForPermutationLanguageModeling( tokenizer=A , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: UpperCAmelCase_ : Tuple = DataCollatorForWholeWordMask( tokenizer=A , mlm_probability=data_args.mlm_probability ) else: UpperCAmelCase_ : List[str] = DataCollatorForLanguageModeling( tokenizer=A , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer UpperCAmelCase_ : Any = Trainer( model=A , args=A , data_collator=A , train_dataset=A , eval_dataset=A , prediction_loss_only=A , ) # Training if training_args.do_train: UpperCAmelCase_ : List[str] = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=A ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation UpperCAmelCase_ : Tuple = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) UpperCAmelCase_ : Dict = trainer.evaluate() UpperCAmelCase_ : Union[str, Any] = math.exp(eval_output['''eval_loss'''] ) UpperCAmelCase_ : Optional[int] = {'''perplexity''': perplexity} UpperCAmelCase_ : int = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' ) if trainer.is_world_master(): with open(A , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''' , A , str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) results.update(A ) return results def __UpperCAmelCase ( A : Tuple ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
304
1
'''simple docstring''' def __UpperCAmelCase ( A : int = 1_0_0_0 ) -> int: UpperCAmelCase_ : Any = 2**power UpperCAmelCase_ : Union[str, Any] = str(A ) UpperCAmelCase_ : Any = list(A ) UpperCAmelCase_ : str = 0 for i in list_num: sum_of_num += int(A ) return sum_of_num if __name__ == "__main__": _UpperCamelCase : Optional[Any] = int(input('Enter the power of 2: ').strip()) print('2 ^ ', power, ' = ', 2**power) _UpperCamelCase : Optional[Any] = solution(power) print('Sum of the digits is: ', result)
304
'''simple docstring''' import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel _UpperCamelCase : Optional[int] = '0.12' # assumed parallelism: 8 @require_flax @is_staging_test class snake_case__ ( unittest.TestCase): @classmethod def A ( cls : Optional[int] ) -> Tuple: UpperCAmelCase_ : List[str] = TOKEN HfFolder.save_token(_A ) @classmethod def A ( cls : int ) -> Tuple: try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def A ( self : Dict ) -> Optional[int]: UpperCAmelCase_ : List[Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) UpperCAmelCase_ : List[str] = FlaxBertModel(_A ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) UpperCAmelCase_ : Any = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" ) UpperCAmelCase_ : int = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : List[str] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_A , repo_id='''test-model-flax''' , push_to_hub=_A , use_auth_token=self._token ) UpperCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" ) UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : int = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" ) def A ( self : str ) -> Tuple: UpperCAmelCase_ : List[str] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) UpperCAmelCase_ : Optional[Any] = FlaxBertModel(_A ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) UpperCAmelCase_ : List[str] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : Any = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( _A , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_A , use_auth_token=self._token ) UpperCAmelCase_ : int = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : Tuple = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" ) def __UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] ) -> List[Any]: UpperCAmelCase_ : Optional[int] = True UpperCAmelCase_ : Optional[int] = flatten_dict(modela.params ) UpperCAmelCase_ : str = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4: UpperCAmelCase_ : int = False return models_are_equal @require_flax class snake_case__ ( unittest.TestCase): def A ( self : Any ) -> Any: UpperCAmelCase_ : Any = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) UpperCAmelCase_ : Any = FlaxBertModel(_A ) UpperCAmelCase_ : Tuple = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_A , _A ) ) with self.assertRaises(_A ): UpperCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(_A ) UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A ) self.assertTrue(check_models_equal(_A , _A ) ) def A ( self : int ) -> Tuple: UpperCAmelCase_ : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) UpperCAmelCase_ : Tuple = FlaxBertModel(_A ) UpperCAmelCase_ : str = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_A , _A ) , max_shard_size='''10KB''' ) with self.assertRaises(_A ): UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(_A ) UpperCAmelCase_ : Dict = FlaxBertModel.from_pretrained(_A , subfolder=_A ) self.assertTrue(check_models_equal(_A , _A ) ) def A ( self : int ) -> Optional[int]: UpperCAmelCase_ : int = '''bert''' UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(_A ): UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(_A ) UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(_A , subfolder=_A ) self.assertIsNotNone(_A ) def A ( self : Any ) -> str: UpperCAmelCase_ : Optional[Any] = '''bert''' UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(_A ): UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A ) UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A ) self.assertIsNotNone(_A )
304
1
'''simple docstring''' from __future__ import annotations import numpy as np def __UpperCAmelCase ( A : np.ndarray ) -> tuple[np.ndarray, np.ndarray]: UpperCAmelCase_ , UpperCAmelCase_ : List[str] = np.shape(A ) if rows != columns: UpperCAmelCase_ : Tuple = ( '''\'table\' has to be of square shaped array but got a ''' F"{rows}x{columns} array:\n{table}" ) raise ValueError(A ) UpperCAmelCase_ : Optional[int] = np.zeros((rows, columns) ) UpperCAmelCase_ : int = np.zeros((rows, columns) ) for i in range(A ): for j in range(A ): UpperCAmelCase_ : List[Any] = sum(lower[i][k] * upper[k][j] for k in range(A ) ) if upper[j][j] == 0: raise ArithmeticError('''No LU decomposition exists''' ) UpperCAmelCase_ : str = (table[i][j] - total) / upper[j][j] UpperCAmelCase_ : str = 1 for j in range(A , A ): UpperCAmelCase_ : Optional[int] = sum(lower[i][k] * upper[k][j] for k in range(A ) ) UpperCAmelCase_ : List[str] = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
304
'''simple docstring''' _UpperCamelCase : Tuple = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _UpperCamelCase : Any = [{'type': 'code', 'content': INSTALL_CONTENT}] _UpperCamelCase : Dict = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
304
1
'''simple docstring''' from math import isqrt, loga def __UpperCAmelCase ( A : int ) -> list[int]: UpperCAmelCase_ : List[str] = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , A , A ): UpperCAmelCase_ : int = False return [i for i in range(2 , A ) if is_prime[i]] def __UpperCAmelCase ( A : int = 8_0_0_8_0_0 , A : int = 8_0_0_8_0_0 ) -> int: UpperCAmelCase_ : Union[str, Any] = degree * loga(A ) UpperCAmelCase_ : Tuple = int(A ) UpperCAmelCase_ : Any = calculate_prime_numbers(A ) UpperCAmelCase_ : Optional[Any] = 0 UpperCAmelCase_ : str = 0 UpperCAmelCase_ : List[Any] = len(A ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f'''{solution() = }''')
304
'''simple docstring''' import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def __UpperCAmelCase ( A : List[str] , A : Any , A : Optional[int] , A : Optional[int] ) -> Optional[Any]: if isinstance(A , A ): UpperCAmelCase_ : Any = np.full((len(A ), sequence_length, 2) , A ) else: UpperCAmelCase_ : int = np.full((len(A ), sequence_length) , A ) for i, tensor in enumerate(A ): if padding_side == "right": if isinstance(A , A ): UpperCAmelCase_ : Tuple = tensor[:sequence_length] else: UpperCAmelCase_ : Dict = tensor[:sequence_length] else: if isinstance(A , A ): UpperCAmelCase_ : Optional[Any] = tensor[:sequence_length] else: UpperCAmelCase_ : int = tensor[:sequence_length] return out_tensor.tolist() def __UpperCAmelCase ( A : List[Any] ) -> str: UpperCAmelCase_ : Dict = ord(A ) if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6): return True UpperCAmelCase_ : Union[str, Any] = unicodedata.category(A ) if cat.startswith('''P''' ): return True return False @dataclass class snake_case__ ( UpperCamelCase): a_ = 42 a_ = True a_ = None a_ = None a_ = -100 a_ = "pt" def A ( self : List[Any] , _A : Dict ) -> Tuple: import torch UpperCAmelCase_ : Dict = '''label''' if '''label''' in features[0].keys() else '''labels''' UpperCAmelCase_ : List[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None UpperCAmelCase_ : Tuple = self.tokenizer.pad( _A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch UpperCAmelCase_ : Any = torch.tensor(batch['''entity_ids'''] ).shape[1] UpperCAmelCase_ : Union[str, Any] = self.tokenizer.padding_side if padding_side == "right": UpperCAmelCase_ : Optional[Any] = [ list(_A ) + [self.label_pad_token_id] * (sequence_length - len(_A )) for label in labels ] else: UpperCAmelCase_ : Any = [ [self.label_pad_token_id] * (sequence_length - len(_A )) + list(_A ) for label in labels ] UpperCAmelCase_ : Union[str, Any] = [feature['''ner_tags'''] for feature in features] UpperCAmelCase_ : Union[str, Any] = padding_tensor(_A , -1 , _A , _A ) UpperCAmelCase_ : List[str] = [feature['''original_entity_spans'''] for feature in features] UpperCAmelCase_ : int = padding_tensor(_A , (-1, -1) , _A , _A ) UpperCAmelCase_ : Union[str, Any] = {k: torch.tensor(_A , dtype=torch.intaa ) for k, v in batch.items()} return batch
304
1
'''simple docstring''' def __UpperCAmelCase ( A : list[int] ) -> list[int]: UpperCAmelCase_ : Any = len(A ) for i in range(A ): for j in range(i + 1 , A ): if numbers[j] < numbers[i]: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = numbers[j], numbers[i] return numbers if __name__ == "__main__": _UpperCamelCase : List[str] = input('Enter numbers separated by a comma:\n').strip() _UpperCamelCase : Dict = [int(item) for item in user_input.split(',')] print(exchange_sort(unsorted))
304
'''simple docstring''' import functools def __UpperCAmelCase ( A : str , A : str ) -> int: UpperCAmelCase_ : Optional[Any] = len(A ) UpperCAmelCase_ : List[str] = len(A ) @functools.cache def min_distance(A : int , A : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCAmelCase_ : Any = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , A ) , 1 + min_distance(A , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
304
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCamelCase : Dict = { 'configuration_x_clip': [ 'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XCLIPConfig', 'XCLIPTextConfig', 'XCLIPVisionConfig', ], 'processing_x_clip': ['XCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : int = [ 'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'XCLIPModel', 'XCLIPPreTrainedModel', 'XCLIPTextModel', 'XCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys _UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
304
'''simple docstring''' def __UpperCAmelCase ( A : int = 1_0_0_0 ) -> int: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = 1, 1 UpperCAmelCase_ : Dict = [] for i in range(1 , n + 1 ): UpperCAmelCase_ : Optional[int] = prev_numerator + 2 * prev_denominator UpperCAmelCase_ : Tuple = prev_numerator + prev_denominator if len(str(A ) ) > len(str(A ) ): result.append(A ) UpperCAmelCase_ : Optional[Any] = numerator UpperCAmelCase_ : Optional[int] = denominator return len(A ) if __name__ == "__main__": print(f'''{solution() = }''')
304
1
'''simple docstring''' import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel _UpperCamelCase : Optional[int] = '0.12' # assumed parallelism: 8 @require_flax @is_staging_test class snake_case__ ( unittest.TestCase): @classmethod def A ( cls : Optional[int] ) -> Tuple: UpperCAmelCase_ : List[str] = TOKEN HfFolder.save_token(_A ) @classmethod def A ( cls : int ) -> Tuple: try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def A ( self : Dict ) -> Optional[int]: UpperCAmelCase_ : List[Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) UpperCAmelCase_ : List[str] = FlaxBertModel(_A ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) UpperCAmelCase_ : Any = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" ) UpperCAmelCase_ : int = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : List[str] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_A , repo_id='''test-model-flax''' , push_to_hub=_A , use_auth_token=self._token ) UpperCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" ) UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : int = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" ) def A ( self : str ) -> Tuple: UpperCAmelCase_ : List[str] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) UpperCAmelCase_ : Optional[Any] = FlaxBertModel(_A ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) UpperCAmelCase_ : List[str] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : Any = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( _A , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=_A , use_auth_token=self._token ) UpperCAmelCase_ : int = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) UpperCAmelCase_ : Dict = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase_ : Tuple = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase_ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_A , 1e-3 , msg=F"{key} not identical" ) def __UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] ) -> List[Any]: UpperCAmelCase_ : Optional[int] = True UpperCAmelCase_ : Optional[int] = flatten_dict(modela.params ) UpperCAmelCase_ : str = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4: UpperCAmelCase_ : int = False return models_are_equal @require_flax class snake_case__ ( unittest.TestCase): def A ( self : Any ) -> Any: UpperCAmelCase_ : Any = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) UpperCAmelCase_ : Any = FlaxBertModel(_A ) UpperCAmelCase_ : Tuple = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_A , _A ) ) with self.assertRaises(_A ): UpperCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(_A ) UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A ) self.assertTrue(check_models_equal(_A , _A ) ) def A ( self : int ) -> Tuple: UpperCAmelCase_ : Dict = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) UpperCAmelCase_ : Tuple = FlaxBertModel(_A ) UpperCAmelCase_ : str = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_A , _A ) , max_shard_size='''10KB''' ) with self.assertRaises(_A ): UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(_A ) UpperCAmelCase_ : Dict = FlaxBertModel.from_pretrained(_A , subfolder=_A ) self.assertTrue(check_models_equal(_A , _A ) ) def A ( self : int ) -> Optional[int]: UpperCAmelCase_ : int = '''bert''' UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(_A ): UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(_A ) UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(_A , subfolder=_A ) self.assertIsNotNone(_A ) def A ( self : Any ) -> str: UpperCAmelCase_ : Optional[Any] = '''bert''' UpperCAmelCase_ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(_A ): UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A ) UpperCAmelCase_ : List[Any] = FlaxBertModel.from_pretrained(_A , subfolder=_A ) self.assertIsNotNone(_A )
304
'''simple docstring''' import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class snake_case__ ( unittest.TestCase): def __init__( self : int , _A : List[str] , _A : Dict=7 , _A : List[str]=3 , _A : List[str]=18 , _A : Dict=30 , _A : Union[str, Any]=4_00 , _A : List[str]=True , _A : List[str]=None , _A : int=True , _A : Tuple=None , _A : Union[str, Any]=True , _A : Tuple=[0.5, 0.5, 0.5] , _A : Union[str, Any]=[0.5, 0.5, 0.5] , _A : Tuple=False , ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = size if size is not None else {'''height''': 20, '''width''': 20} UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : Optional[int] = batch_size UpperCAmelCase_ : Any = num_channels UpperCAmelCase_ : Optional[Any] = image_size UpperCAmelCase_ : Tuple = min_resolution UpperCAmelCase_ : Tuple = max_resolution UpperCAmelCase_ : Optional[int] = do_resize UpperCAmelCase_ : Tuple = size UpperCAmelCase_ : Optional[Any] = do_center_crop UpperCAmelCase_ : Optional[int] = crop_size UpperCAmelCase_ : Tuple = do_normalize UpperCAmelCase_ : Optional[Any] = image_mean UpperCAmelCase_ : int = image_std UpperCAmelCase_ : List[Any] = do_reduce_labels def A ( self : Union[str, Any] ) -> str: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def __UpperCAmelCase ( ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) UpperCAmelCase_ : Optional[Any] = Image.open(dataset[0]['''file'''] ) UpperCAmelCase_ : str = Image.open(dataset[1]['''file'''] ) return image, map def __UpperCAmelCase ( ) -> Any: UpperCAmelCase_ : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) UpperCAmelCase_ : int = Image.open(ds[0]['''file'''] ) UpperCAmelCase_ : Optional[Any] = Image.open(ds[1]['''file'''] ) UpperCAmelCase_ : Dict = Image.open(ds[2]['''file'''] ) UpperCAmelCase_ : List[str] = Image.open(ds[3]['''file'''] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class snake_case__ ( UpperCamelCase , unittest.TestCase): a_ = BeitImageProcessor if is_vision_available() else None def A ( self : Optional[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = BeitImageProcessingTester(self ) @property def A ( self : List[Any] ) -> Tuple: return self.image_processor_tester.prepare_image_processor_dict() def A ( self : List[Any] ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''center_crop''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) def A ( self : List[str] ) -> Optional[int]: UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) self.assertEqual(image_processor.do_reduce_labels , _A ) UpperCAmelCase_ : Union[str, Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_A ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) self.assertEqual(image_processor.do_reduce_labels , _A ) def A ( self : Optional[Any] ) -> Any: pass def A ( self : List[str] ) -> Optional[int]: # Initialize image_processing UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase_ : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def A ( self : Union[str, Any] ) -> Union[str, Any]: # Initialize image_processing UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def A ( self : Optional[int] ) -> str: # Initialize image_processing UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def A ( self : Any ) -> Optional[Any]: # Initialize image_processing UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) UpperCAmelCase_ : Union[str, Any] = [] for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input UpperCAmelCase_ : str = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test batched UpperCAmelCase_ : List[Any] = image_processing(_A , _A , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test not batched input (PIL images) UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs() UpperCAmelCase_ : List[str] = image_processing(_A , _A , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 1, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) # Test batched input (PIL images) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = prepare_semantic_batch_inputs() UpperCAmelCase_ : int = image_processing(_A , _A , return_tensors='''pt''' ) self.assertEqual( encoding['''pixel_values'''].shape , ( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual( encoding['''labels'''].shape , ( 2, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) self.assertEqual(encoding['''labels'''].dtype , torch.long ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 ) def A ( self : List[Any] ) -> Union[str, Any]: # Initialize image_processing UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 UpperCAmelCase_ , UpperCAmelCase_ : Any = prepare_semantic_single_inputs() UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 1_50 ) UpperCAmelCase_ : int = True UpperCAmelCase_ : Dict = image_processing(_A , _A , return_tensors='''pt''' ) self.assertTrue(encoding['''labels'''].min().item() >= 0 ) self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
304
1
'''simple docstring''' import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def __UpperCAmelCase ( A : List[Any] , A : Dict , A : int ) -> Tuple: # Initialise PyTorch model UpperCAmelCase_ : Optional[int] = AlbertConfig.from_json_file(A ) print(F"Building PyTorch model from configuration: {config}" ) UpperCAmelCase_ : Optional[int] = AlbertForPreTraining(A ) # Load weights from tf checkpoint load_tf_weights_in_albert(A , A , A ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , A ) if __name__ == "__main__": _UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--albert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained ALBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _UpperCamelCase : List[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
304
'''simple docstring''' import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class snake_case__ ( enum.Enum): a_ = 0 a_ = 1 a_ = 2 @add_end_docstrings(UpperCamelCase) class snake_case__ ( UpperCamelCase): a_ = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n " def __init__( self : List[str] , *_A : Dict , **_A : int ) -> Optional[int]: super().__init__(*_A , **_A ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. UpperCAmelCase_ : Dict = None if self.model.config.prefix is not None: UpperCAmelCase_ : Tuple = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. UpperCAmelCase_ : Optional[Any] = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self._sanitize_parameters(prefix=_A , **self._forward_params ) UpperCAmelCase_ : int = {**self._preprocess_params, **preprocess_params} UpperCAmelCase_ : List[str] = {**self._forward_params, **forward_params} def A ( self : Union[str, Any] , _A : int=None , _A : str=None , _A : Union[str, Any]=None , _A : List[Any]=None , _A : List[Any]=None , _A : int=None , _A : Optional[int]=None , _A : List[Any]=None , **_A : List[Any] , ) -> Dict: UpperCAmelCase_ : Union[str, Any] = {} if prefix is not None: UpperCAmelCase_ : List[Any] = prefix if prefix: UpperCAmelCase_ : Tuple = self.tokenizer( _A , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) UpperCAmelCase_ : List[Any] = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected" ''' [None, \'hole\']''' ) UpperCAmelCase_ : Union[str, Any] = handle_long_generation preprocess_params.update(_A ) UpperCAmelCase_ : Optional[int] = generate_kwargs UpperCAmelCase_ : Tuple = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) UpperCAmelCase_ : int = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) UpperCAmelCase_ : List[Any] = ReturnType.TENSORS if return_type is not None: UpperCAmelCase_ : List[Any] = return_type if clean_up_tokenization_spaces is not None: UpperCAmelCase_ : List[Any] = clean_up_tokenization_spaces if stop_sequence is not None: UpperCAmelCase_ : Any = self.tokenizer.encode(_A , add_special_tokens=_A ) if len(_A ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) UpperCAmelCase_ : str = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def A ( self : Dict , *_A : Optional[Any] , **_A : Any ) -> Any: # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*_A , **_A ) def __call__( self : List[Any] , _A : Union[str, Any] , **_A : List[str] ) -> Dict: return super().__call__(_A , **_A ) def A ( self : List[Any] , _A : List[Any] , _A : Any="" , _A : Dict=None , **_A : Dict ) -> Optional[Any]: UpperCAmelCase_ : Tuple = self.tokenizer( prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework ) UpperCAmelCase_ : str = prompt_text if handle_long_generation == "hole": UpperCAmelCase_ : List[str] = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: UpperCAmelCase_ : Optional[int] = generate_kwargs['''max_new_tokens'''] else: UpperCAmelCase_ : Union[str, Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: UpperCAmelCase_ : Dict = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) UpperCAmelCase_ : List[str] = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: UpperCAmelCase_ : Optional[int] = inputs['''attention_mask'''][:, -keep_length:] return inputs def A ( self : List[str] , _A : Optional[Any] , **_A : str ) -> Optional[int]: UpperCAmelCase_ : Any = model_inputs['''input_ids'''] UpperCAmelCase_ : Dict = model_inputs.get('''attention_mask''' , _A ) # Allow empty prompts if input_ids.shape[1] == 0: UpperCAmelCase_ : Any = None UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : Union[str, Any] = 1 else: UpperCAmelCase_ : Optional[int] = input_ids.shape[0] UpperCAmelCase_ : Dict = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. UpperCAmelCase_ : List[str] = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: UpperCAmelCase_ : str = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: UpperCAmelCase_ : Any = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length UpperCAmelCase_ : Optional[Any] = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL UpperCAmelCase_ : Union[str, Any] = self.model.generate(input_ids=_A , attention_mask=_A , **_A ) UpperCAmelCase_ : Any = generated_sequence.shape[0] if self.framework == "pt": UpperCAmelCase_ : List[str] = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": UpperCAmelCase_ : int = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def A ( self : int , _A : List[Any] , _A : Dict=ReturnType.FULL_TEXT , _A : Dict=True ) -> Union[str, Any]: UpperCAmelCase_ : List[str] = model_outputs['''generated_sequence'''][0] UpperCAmelCase_ : int = model_outputs['''input_ids'''] UpperCAmelCase_ : str = model_outputs['''prompt_text'''] UpperCAmelCase_ : Any = generated_sequence.numpy().tolist() UpperCAmelCase_ : int = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: UpperCAmelCase_ : Optional[Any] = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text UpperCAmelCase_ : Any = self.tokenizer.decode( _A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: UpperCAmelCase_ : List[str] = 0 else: UpperCAmelCase_ : str = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) ) if return_type == ReturnType.FULL_TEXT: UpperCAmelCase_ : Dict = prompt_text + text[prompt_length:] else: UpperCAmelCase_ : Dict = text[prompt_length:] UpperCAmelCase_ : List[str] = {'''generated_text''': all_text} records.append(_A ) return records
304
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class snake_case__ ( unittest.TestCase): def __init__( self : List[str] , _A : Any , _A : Any=7 , _A : Optional[int]=3 , _A : str=18 , _A : Optional[Any]=30 , _A : int=4_00 , _A : int=True , _A : str=None , _A : int=True , _A : Optional[int]=False , _A : int=True , _A : Tuple=True , _A : Optional[Any]=[0.5, 0.5, 0.5] , _A : List[str]=[0.5, 0.5, 0.5] , ) -> str: UpperCAmelCase_ : Any = parent UpperCAmelCase_ : Tuple = batch_size UpperCAmelCase_ : str = num_channels UpperCAmelCase_ : Dict = image_size UpperCAmelCase_ : Any = min_resolution UpperCAmelCase_ : List[str] = max_resolution UpperCAmelCase_ : List[str] = do_resize UpperCAmelCase_ : Dict = size if size is not None else {'''height''': 18, '''width''': 20} UpperCAmelCase_ : Tuple = do_thumbnail UpperCAmelCase_ : List[str] = do_align_axis UpperCAmelCase_ : Any = do_pad UpperCAmelCase_ : int = do_normalize UpperCAmelCase_ : Any = image_mean UpperCAmelCase_ : List[str] = image_std def A ( self : Optional[int] ) -> Union[str, Any]: return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class snake_case__ ( UpperCamelCase , unittest.TestCase): a_ = DonutImageProcessor if is_vision_available() else None def A ( self : List[Any] ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = DonutImageProcessingTester(self ) @property def A ( self : Optional[int] ) -> str: return self.image_processor_tester.prepare_image_processor_dict() def A ( self : Optional[Any] ) -> Dict: UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_thumbnail''' ) ) self.assertTrue(hasattr(_A , '''do_align_long_axis''' ) ) self.assertTrue(hasattr(_A , '''do_pad''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) def A ( self : List[str] ) -> List[str]: UpperCAmelCase_ : str = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} ) UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) # Previous config had dimensions in (width, height) order UpperCAmelCase_ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} ) def A ( self : Union[str, Any] ) -> Optional[int]: pass @is_flaky() def A ( self : Any ) -> Optional[Any]: # Initialize image_processing UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input UpperCAmelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched UpperCAmelCase_ : Tuple = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) @is_flaky() def A ( self : Optional[int] ) -> Optional[Any]: # Initialize image_processing UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input UpperCAmelCase_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched UpperCAmelCase_ : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) @is_flaky() def A ( self : List[str] ) -> str: # Initialize image_processing UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched UpperCAmelCase_ : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , )
304
'''simple docstring''' from __future__ import annotations import math def __UpperCAmelCase ( A : int , A : int , A : bool , A : list[int] , A : float ) -> int: if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if not scores: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , ) if is_max else min( minimax(depth + 1 , node_index * 2 , A , A , A ) , minimax(depth + 1 , node_index * 2 + 1 , A , A , A ) , ) ) def __UpperCAmelCase ( ) -> None: UpperCAmelCase_ : List[str] = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3] UpperCAmelCase_ : List[Any] = math.log(len(A ) , 2 ) print(F"Optimal value : {minimax(0 , 0 , A , A , A )}" ) if __name__ == "__main__": import doctest doctest.testmod() main()
304
1
'''simple docstring''' from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def __UpperCAmelCase ( ) -> List[Any]: import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join UpperCAmelCase_ : Optional[int] = '''__test_patch_submodule_mock__''' with patch_submodule(_test_patching , '''os.path.join''' , A ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def __UpperCAmelCase ( ) -> List[Any]: assert _test_patching.open is open UpperCAmelCase_ : str = '''__test_patch_submodule_builtin_mock__''' # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , '''open''' , A ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def __UpperCAmelCase ( ) -> List[Any]: # pandas.read_csv is not present in _test_patching UpperCAmelCase_ : Union[str, Any] = '''__test_patch_submodule_missing_mock__''' with patch_submodule(_test_patching , '''pandas.read_csv''' , A ): pass def __UpperCAmelCase ( ) -> List[Any]: # builtin should always be mocked even if they're not in the globals # in case they're loaded at one point UpperCAmelCase_ : List[str] = '''__test_patch_submodule_missing_builtin_mock__''' # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , '''len''' , A ) is None with patch_submodule(_test_patching , '''len''' , A ): assert _test_patching.len is mock assert _test_patching.len is len def __UpperCAmelCase ( ) -> str: UpperCAmelCase_ : List[str] = '''__test_patch_submodule_start_and_stop_mock__''' UpperCAmelCase_ : Dict = patch_submodule(_test_patching , '''open''' , A ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def __UpperCAmelCase ( ) -> Union[str, Any]: from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join UpperCAmelCase_ : List[str] = '''__test_patch_submodule_successive_join__''' UpperCAmelCase_ : Dict = '''__test_patch_submodule_successive_dirname__''' UpperCAmelCase_ : int = '''__test_patch_submodule_successive_rename__''' assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , '''os.path.join''' , A ): with patch_submodule(_test_patching , '''os.rename''' , A ): with patch_submodule(_test_patching , '''os.path.dirname''' , A ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , '''os.rename''' , A ): with patch_submodule(_test_patching , '''os.path.join''' , A ): with patch_submodule(_test_patching , '''os.path.dirname''' , A ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def __UpperCAmelCase ( ) -> Tuple: UpperCAmelCase_ : Tuple = '''__test_patch_submodule_doesnt_exist_mock__''' with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A ): pass with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A ): pass
304
'''simple docstring''' from __future__ import annotations def __UpperCAmelCase ( A : list , A : int , A : int , A : int ) -> list: UpperCAmelCase_ : Any = [] UpperCAmelCase_ , UpperCAmelCase_ : Tuple = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) UpperCAmelCase_ : List[Any] = result + left + right return input_list def __UpperCAmelCase ( A : list ) -> list: if len(A ) <= 1: return input_list UpperCAmelCase_ : List[str] = list(A ) # iteration for two-way merging UpperCAmelCase_ : Tuple = 2 while p <= len(A ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(A ) , A ): UpperCAmelCase_ : Union[str, Any] = i UpperCAmelCase_ : int = i + p - 1 UpperCAmelCase_ : Any = (low + high + 1) // 2 UpperCAmelCase_ : Union[str, Any] = merge(A , A , A , A ) # final merge of last two parts if p * 2 >= len(A ): UpperCAmelCase_ : str = i UpperCAmelCase_ : Tuple = merge(A , 0 , A , len(A ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": _UpperCamelCase : str = input('Enter numbers separated by a comma:\n').strip() if user_input == "": _UpperCamelCase : List[str] = [] else: _UpperCamelCase : Optional[int] = [int(item.strip()) for item in user_input.split(',')] print(iter_merge_sort(unsorted))
304
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class snake_case__ ( unittest.TestCase): @slow def A ( self : str ) -> Any: UpperCAmelCase_ : Any = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_A ).to(_A ) UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' ) UpperCAmelCase_ : Tuple = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids UpperCAmelCase_ : Union[str, Any] = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids UpperCAmelCase_ : str = model(input_ids.to(_A ) , labels=labels.to(_A ) ).loss UpperCAmelCase_ : Dict = -(labels.shape[-1] * loss.item()) UpperCAmelCase_ : Any = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
304
'''simple docstring''' from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class snake_case__ : a_ = 42 # [batch_size x 3] a_ = 42 # [batch_size x 3] a_ = 42 # [batch_size x 3] a_ = 42 # [batch_size x 3] a_ = 42 a_ = 42 a_ = 42 a_ = 42 a_ = 42 def A ( self : Tuple ) -> Optional[int]: assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def A ( self : List[Any] ) -> Union[str, Any]: return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def A ( self : Any ) -> Optional[Any]: return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def A ( self : Optional[int] ) -> torch.Tensor: UpperCAmelCase_ : Dict = torch.arange(self.height * self.width ) UpperCAmelCase_ : int = torch.stack( [ pixel_indices % self.width, torch.div(_A , self.width , rounding_mode='''trunc''' ), ] , axis=1 , ) return coords @property def A ( self : Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ , *UpperCAmelCase_ : Union[str, Any] = self.shape UpperCAmelCase_ : Optional[Any] = int(np.prod(_A ) ) UpperCAmelCase_ : Any = self.get_image_coords() UpperCAmelCase_ : Any = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) UpperCAmelCase_ : Union[str, Any] = self.get_camera_rays(_A ) UpperCAmelCase_ : str = rays.view(_A , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def A ( self : Optional[int] , _A : torch.Tensor ) -> torch.Tensor: UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] UpperCAmelCase_ : Dict = coords.view(_A , -1 , 2 ) UpperCAmelCase_ : Union[str, Any] = self.resolution() UpperCAmelCase_ : int = self.fov() UpperCAmelCase_ : Dict = (flat.float() / (res - 1)) * 2 - 1 UpperCAmelCase_ : Optional[int] = fracs * torch.tan(fov / 2 ) UpperCAmelCase_ : Any = fracs.view(_A , -1 , 2 ) UpperCAmelCase_ : List[Any] = ( self.z.view(_A , 1 , 3 ) + self.x.view(_A , 1 , 3 ) * fracs[:, :, :1] + self.y.view(_A , 1 , 3 ) * fracs[:, :, 1:] ) UpperCAmelCase_ : Optional[Any] = directions / directions.norm(dim=-1 , keepdim=_A ) UpperCAmelCase_ : Union[str, Any] = torch.stack( [ torch.broadcast_to(self.origin.view(_A , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(_A , *_A , 2 , 3 ) def A ( self : Tuple , _A : int , _A : int ) -> "DifferentiableProjectiveCamera": assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=_A , height=_A , x_fov=self.x_fov , y_fov=self.y_fov , ) def __UpperCAmelCase ( A : int ) -> DifferentiableProjectiveCamera: UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : str = [] for theta in np.linspace(0 , 2 * np.pi , num=2_0 ): UpperCAmelCase_ : str = np.array([np.sin(A ), np.cos(A ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) UpperCAmelCase_ : Optional[int] = -z * 4 UpperCAmelCase_ : Optional[int] = np.array([np.cos(A ), -np.sin(A ), 0.0] ) UpperCAmelCase_ : List[Any] = np.cross(A , A ) origins.append(A ) xs.append(A ) ys.append(A ) zs.append(A ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(A , axis=0 ) ).float() , x=torch.from_numpy(np.stack(A , axis=0 ) ).float() , y=torch.from_numpy(np.stack(A , axis=0 ) ).float() , z=torch.from_numpy(np.stack(A , axis=0 ) ).float() , width=A , height=A , x_fov=0.7 , y_fov=0.7 , shape=(1, len(A )) , )
304
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging _UpperCamelCase : Optional[int] = logging.get_logger(__name__) _UpperCamelCase : List[Any] = { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class snake_case__ ( UpperCamelCase): a_ = "blenderbot-small" a_ = ["past_key_values"] a_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Any , _A : Union[str, Any]=5_02_65 , _A : Union[str, Any]=5_12 , _A : Any=8 , _A : Tuple=20_48 , _A : Tuple=16 , _A : Optional[Any]=8 , _A : Any=20_48 , _A : str=16 , _A : List[Any]=0.0 , _A : str=0.0 , _A : int=True , _A : Optional[Any]=True , _A : Optional[int]="gelu" , _A : List[str]=5_12 , _A : Tuple=0.1 , _A : Optional[int]=0.0 , _A : str=0.0 , _A : Any=0.02 , _A : Tuple=1 , _A : Optional[int]=False , _A : Optional[Any]=0 , _A : Dict=1 , _A : List[Any]=2 , _A : Tuple=2 , **_A : Optional[int] , ) -> Optional[int]: UpperCAmelCase_ : Dict = vocab_size UpperCAmelCase_ : Optional[int] = max_position_embeddings UpperCAmelCase_ : int = d_model UpperCAmelCase_ : str = encoder_ffn_dim UpperCAmelCase_ : Dict = encoder_layers UpperCAmelCase_ : Any = encoder_attention_heads UpperCAmelCase_ : List[Any] = decoder_ffn_dim UpperCAmelCase_ : List[Any] = decoder_layers UpperCAmelCase_ : Union[str, Any] = decoder_attention_heads UpperCAmelCase_ : str = dropout UpperCAmelCase_ : Optional[Any] = attention_dropout UpperCAmelCase_ : Union[str, Any] = activation_dropout UpperCAmelCase_ : Dict = activation_function UpperCAmelCase_ : Tuple = init_std UpperCAmelCase_ : List[str] = encoder_layerdrop UpperCAmelCase_ : Dict = decoder_layerdrop UpperCAmelCase_ : Optional[Any] = use_cache UpperCAmelCase_ : Dict = encoder_layers UpperCAmelCase_ : str = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , ) class snake_case__ ( UpperCamelCase): @property def A ( self : Dict ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase_ : Union[str, Any] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: UpperCAmelCase_ : Dict = {0: '''batch'''} UpperCAmelCase_ : Any = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: UpperCAmelCase_ : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''} UpperCAmelCase_ : List[str] = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(_A , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. UpperCAmelCase_ : Dict = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.num_layers for i in range(_A ): UpperCAmelCase_ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} UpperCAmelCase_ : Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''} else: UpperCAmelCase_ : Tuple = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def A ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase_ : List[Any] = super().outputs else: UpperCAmelCase_ : Any = super(_A , self ).outputs if self.use_past: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.num_layers for i in range(_A ): UpperCAmelCase_ : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''} UpperCAmelCase_ : int = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def A ( self : Optional[int] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ) -> Mapping[str, Any]: UpperCAmelCase_ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _A , _A , _A , _A , _A ) # Generate decoder inputs UpperCAmelCase_ : List[str] = seq_length if not self.use_past else 1 UpperCAmelCase_ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _A , _A , _A , _A , _A ) UpperCAmelCase_ : Optional[int] = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} UpperCAmelCase_ : int = dict(**_A , **_A ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = common_inputs['''input_ids'''].shape UpperCAmelCase_ : Dict = common_inputs['''decoder_input_ids'''].shape[1] UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.num_attention_heads UpperCAmelCase_ : List[Any] = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCAmelCase_ : Dict = decoder_seq_length + 3 UpperCAmelCase_ : List[Any] = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) UpperCAmelCase_ : Tuple = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(_A , _A )] , dim=1 ) UpperCAmelCase_ : int = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.num_layers UpperCAmelCase_ : Optional[Any] = min(_A , _A ) UpperCAmelCase_ : Optional[Any] = max(_A , _A ) - min_num_layers UpperCAmelCase_ : Dict = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(_A ): common_inputs["past_key_values"].append( ( torch.zeros(_A ), torch.zeros(_A ), torch.zeros(_A ), torch.zeros(_A ), ) ) # TODO: test this. UpperCAmelCase_ : Union[str, Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(_A , _A ): common_inputs["past_key_values"].append((torch.zeros(_A ), torch.zeros(_A )) ) return common_inputs def A ( self : Optional[Any] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ) -> Mapping[str, Any]: UpperCAmelCase_ : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _A , _A , _A , _A , _A ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values UpperCAmelCase_ : str = seqlen + 2 UpperCAmelCase_ , UpperCAmelCase_ : int = self.num_layers UpperCAmelCase_ , UpperCAmelCase_ : Any = self.num_attention_heads UpperCAmelCase_ : Union[str, Any] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCAmelCase_ : Any = common_inputs['''attention_mask'''].dtype UpperCAmelCase_ : List[Any] = torch.cat( [common_inputs['''attention_mask'''], torch.ones(_A , _A , dtype=_A )] , dim=1 ) UpperCAmelCase_ : Tuple = [ (torch.zeros(_A ), torch.zeros(_A )) for _ in range(_A ) ] return common_inputs def A ( self : Tuple , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCAmelCase_ : List[Any] = compute_effective_axis_dimension( _A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCAmelCase_ : List[Any] = tokenizer.num_special_tokens_to_add(_A ) UpperCAmelCase_ : Any = compute_effective_axis_dimension( _A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_A ) # Generate dummy inputs according to compute batch and sequence UpperCAmelCase_ : Dict = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size UpperCAmelCase_ : Optional[Any] = dict(tokenizer(_A , return_tensors=_A ) ) return common_inputs def A ( self : List[str] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase_ : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A ) elif self.task == "causal-lm": UpperCAmelCase_ : Tuple = self._generate_dummy_inputs_for_causal_lm( _A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A ) else: UpperCAmelCase_ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A ) return common_inputs def A ( self : List[Any] , _A : Union[str, Any] , _A : Union[str, Any] , _A : Any , _A : Optional[Any] ) -> Tuple: if self.task in ["default", "seq2seq-lm"]: UpperCAmelCase_ : Optional[Any] = super()._flatten_past_key_values_(_A , _A , _A , _A ) else: UpperCAmelCase_ : Any = super(_A , self )._flatten_past_key_values_( _A , _A , _A , _A )
304
'''simple docstring''' import random class snake_case__ : @staticmethod def A ( _A : str ) -> tuple[list[int], list[int]]: UpperCAmelCase_ : Dict = [ord(_A ) for i in text] UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : Any = [] for i in plain: UpperCAmelCase_ : int = random.randint(1 , 3_00 ) UpperCAmelCase_ : str = (i + k) * k cipher.append(_A ) key.append(_A ) return cipher, key @staticmethod def A ( _A : list[int] , _A : list[int] ) -> str: UpperCAmelCase_ : Dict = [] for i in range(len(_A ) ): UpperCAmelCase_ : int = int((cipher[i] - (key[i]) ** 2) / key[i] ) plain.append(chr(_A ) ) return "".join(_A ) if __name__ == "__main__": _UpperCamelCase , _UpperCamelCase : Any = Onepad().encrypt('Hello') print(c, k) print(Onepad().decrypt(c, k))
304
1
'''simple docstring''' def __UpperCAmelCase ( A : str ) -> bool: if not all(x.isalpha() for x in string ): raise ValueError('''String must only contain alphabetic characters.''' ) UpperCAmelCase_ : List[str] = sorted(string.lower() ) return len(A ) == len(set(A ) ) if __name__ == "__main__": _UpperCamelCase : Dict = input('Enter a string ').strip() _UpperCamelCase : Tuple = is_isogram(input_str) print(f'''{input_str} is {"an" if isogram else "not an"} isogram.''')
304
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _UpperCamelCase : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class snake_case__ ( UpperCamelCase , unittest.TestCase): a_ = ReformerTokenizer a_ = ReformerTokenizerFast a_ = True a_ = False a_ = True def A ( self : Optional[Any] ) -> List[Any]: super().setUp() UpperCAmelCase_ : Tuple = ReformerTokenizer(_A , keep_accents=_A ) tokenizer.save_pretrained(self.tmpdirname ) def A ( self : Optional[Any] ) -> Any: UpperCAmelCase_ : List[Any] = '''<s>''' UpperCAmelCase_ : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def A ( self : Any ) -> str: UpperCAmelCase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''j''' ) self.assertEqual(len(_A ) , 10_00 ) def A ( self : Optional[int] ) -> int: self.assertEqual(self.get_tokenizer().vocab_size , 10_00 ) def A ( self : Optional[Any] ) -> List[Any]: if not self.test_rust_tokenizer: return UpperCAmelCase_ : int = self.get_tokenizer() UpperCAmelCase_ : Tuple = self.get_rust_tokenizer() UpperCAmelCase_ : Any = '''I was born in 92000, and this is falsé.''' UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize(_A ) UpperCAmelCase_ : Optional[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) UpperCAmelCase_ : List[str] = tokenizer.encode(_A , add_special_tokens=_A ) UpperCAmelCase_ : int = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) UpperCAmelCase_ : Tuple = self.get_rust_tokenizer() UpperCAmelCase_ : Dict = tokenizer.encode(_A ) UpperCAmelCase_ : List[str] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def A ( self : Tuple , _A : Dict=15 ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(_A , **_A ) # Simple input UpperCAmelCase_ : Optional[int] = '''This is a simple input''' UpperCAmelCase_ : List[str] = ['''This is a simple input 1''', '''This is a simple input 2'''] UpperCAmelCase_ : Union[str, Any] = ('''This is a simple input''', '''This is a pair''') UpperCAmelCase_ : Dict = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' ) # Simple input self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' ) # Simple input self.assertRaises( _A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , ) # Pair input self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''' ) # Pair input self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''' ) # Pair input self.assertRaises( _A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , ) def A ( self : Union[str, Any] ) -> int: pass def A ( self : int ) -> Any: UpperCAmelCase_ : Any = ReformerTokenizer(_A , keep_accents=_A ) UpperCAmelCase_ : List[str] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_A ) , [2_85, 46, 10, 1_70, 3_82] , ) UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) UpperCAmelCase_ : List[str] = tokenizer.convert_tokens_to_ids(_A ) self.assertListEqual( _A , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) UpperCAmelCase_ : List[str] = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual( _A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def A ( self : List[str] ) -> Optional[int]: return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' ) @slow def A ( self : str ) -> str: UpperCAmelCase_ : Tuple = '''Hello World!''' UpperCAmelCase_ : int = [1_26, 32, 2_62, 1_52, 38, 72, 2_87] self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @slow def A ( self : List[Any] ) -> str: UpperCAmelCase_ : Tuple = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) UpperCAmelCase_ : int = [ 1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 35, 28, 2_75, 3, 2_59, 2_97, 2_60, 84, 4, 35, 1_10, 44, 8, 2_59, 91, 2_68, 21, 11, 2_09, 2_74, 1_09, 2_66, 2_77, 1_17, 86, 93, 3_15, 2_58, 2_78, 2_58, 2_77, 2_58, 0, 2_58, 2_88, 2_58, 3_19, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 0, 2_58, 2_87, 2_58, 3_15, 2_58, 2_89, 2_58, 2_78, 99, 2_69, 2_66, 2_62, 8, 2_59, 2_41, 4, 2_17, 2_30, 2_68, 2_66, 55, 1_68, 1_06, 75, 1_93, 2_66, 2_23, 27, 49, 26, 2_82, 25, 2_64, 2_99, 19, 26, 0, 2_58, 2_77, 1_17, 86, 93, 1_76, 1_83, 2_70, 11, 2_62, 42, 61, 2_65, ] self.assertListEqual(_A , self.big_tokenizer.encode(_A ) ) @require_torch @slow def A ( self : List[str] ) -> Optional[int]: import torch from transformers import ReformerConfig, ReformerModel # Build sequence UpperCAmelCase_ : int = list(self.big_tokenizer.get_vocab().keys() )[:10] UpperCAmelCase_ : List[Any] = ''' '''.join(_A ) UpperCAmelCase_ : str = self.big_tokenizer.encode_plus(_A , return_tensors='''pt''' ) UpperCAmelCase_ : Any = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' ) UpperCAmelCase_ : List[Any] = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) UpperCAmelCase_ : Any = encoded_sequence['''input_ids'''].shape UpperCAmelCase_ : Optional[int] = ReformerModel(_A ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**_A ) model(**_A ) @slow def A ( self : int ) -> Optional[Any]: # fmt: off UpperCAmelCase_ : int = {'''input_ids''': [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 UpperCAmelCase_ : Optional[Any] = [ '''This is a very simple sentence.''', '''The quick brown fox jumps over the lazy dog.''', ] self.tokenizer_integration_test_util( expected_encoding=_A , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=_A , sequences=_A , )
304
1
'''simple docstring''' import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) _UpperCamelCase : Any = { 'sample_size': 32, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 2, 'num_class_embeds': 1_000, 'block_out_channels': [32, 64], 'attention_head_dim': 8, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'scale_shift', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } _UpperCamelCase : List[str] = { 'sample_size': 64, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 3, 'num_class_embeds': 1_000, 'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4], 'attention_head_dim': 64, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'AttnUpBlock2D', 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'scale_shift', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } _UpperCamelCase : Dict = { 'sample_size': 256, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 2, 'num_class_embeds': None, 'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], 'attention_head_dim': 64, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'ResnetDownsampleBlock2D', 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'AttnUpBlock2D', 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', 'ResnetUpsampleBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'default', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } _UpperCamelCase : str = { 'num_train_timesteps': 40, 'sigma_min': 0.0_02, 'sigma_max': 80.0, } _UpperCamelCase : Any = { 'num_train_timesteps': 201, 'sigma_min': 0.0_02, 'sigma_max': 80.0, } _UpperCamelCase : int = { 'num_train_timesteps': 151, 'sigma_min': 0.0_02, 'sigma_max': 80.0, } def __UpperCAmelCase ( A : Dict ) -> int: if isinstance(A , A ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('''boolean value expected''' ) def __UpperCAmelCase ( A : Optional[int] , A : int , A : int , A : Dict , A : List[Any]=False ) -> Optional[int]: UpperCAmelCase_ : List[str] = checkpoint[F"{old_prefix}.in_layers.0.weight"] UpperCAmelCase_ : Optional[Any] = checkpoint[F"{old_prefix}.in_layers.0.bias"] UpperCAmelCase_ : List[str] = checkpoint[F"{old_prefix}.in_layers.2.weight"] UpperCAmelCase_ : Optional[int] = checkpoint[F"{old_prefix}.in_layers.2.bias"] UpperCAmelCase_ : Tuple = checkpoint[F"{old_prefix}.emb_layers.1.weight"] UpperCAmelCase_ : List[str] = checkpoint[F"{old_prefix}.emb_layers.1.bias"] UpperCAmelCase_ : Optional[Any] = checkpoint[F"{old_prefix}.out_layers.0.weight"] UpperCAmelCase_ : List[Any] = checkpoint[F"{old_prefix}.out_layers.0.bias"] UpperCAmelCase_ : str = checkpoint[F"{old_prefix}.out_layers.3.weight"] UpperCAmelCase_ : str = checkpoint[F"{old_prefix}.out_layers.3.bias"] if has_skip: UpperCAmelCase_ : int = checkpoint[F"{old_prefix}.skip_connection.weight"] UpperCAmelCase_ : List[str] = checkpoint[F"{old_prefix}.skip_connection.bias"] return new_checkpoint def __UpperCAmelCase ( A : str , A : List[Any] , A : int , A : List[str] , A : int=None ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = checkpoint[F"{old_prefix}.qkv.weight"].chunk(3 , dim=0 ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = checkpoint[F"{old_prefix}.qkv.bias"].chunk(3 , dim=0 ) UpperCAmelCase_ : List[Any] = checkpoint[F"{old_prefix}.norm.weight"] UpperCAmelCase_ : List[Any] = checkpoint[F"{old_prefix}.norm.bias"] UpperCAmelCase_ : Dict = weight_q.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ : int = bias_q.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ : List[str] = weight_k.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ : int = bias_k.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ : List[str] = weight_v.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ : List[str] = bias_v.squeeze(-1 ).squeeze(-1 ) UpperCAmelCase_ : Any = ( checkpoint[F"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 ) ) UpperCAmelCase_ : str = checkpoint[F"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def __UpperCAmelCase ( A : str , A : Tuple ) -> Any: UpperCAmelCase_ : Tuple = torch.load(A , map_location='''cpu''' ) UpperCAmelCase_ : str = {} UpperCAmelCase_ : Optional[Any] = checkpoint['''time_embed.0.weight'''] UpperCAmelCase_ : Any = checkpoint['''time_embed.0.bias'''] UpperCAmelCase_ : Optional[int] = checkpoint['''time_embed.2.weight'''] UpperCAmelCase_ : List[Any] = checkpoint['''time_embed.2.bias'''] if unet_config["num_class_embeds"] is not None: UpperCAmelCase_ : Optional[int] = checkpoint['''label_emb.weight'''] UpperCAmelCase_ : Any = checkpoint['''input_blocks.0.0.weight'''] UpperCAmelCase_ : int = checkpoint['''input_blocks.0.0.bias'''] UpperCAmelCase_ : List[str] = unet_config['''down_block_types'''] UpperCAmelCase_ : Dict = unet_config['''layers_per_block'''] UpperCAmelCase_ : str = unet_config['''attention_head_dim'''] UpperCAmelCase_ : Union[str, Any] = unet_config['''block_out_channels'''] UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : List[str] = channels_list[0] for i, layer_type in enumerate(A ): UpperCAmelCase_ : List[Any] = channels_list[i] UpperCAmelCase_ : Union[str, Any] = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(A ): UpperCAmelCase_ : int = F"down_blocks.{i}.resnets.{j}" UpperCAmelCase_ : List[Any] = F"input_blocks.{current_layer}.0" UpperCAmelCase_ : List[str] = True if j == 0 and downsample_block_has_skip else False UpperCAmelCase_ : Optional[Any] = convert_resnet(A , A , A , A , has_skip=A ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(A ): UpperCAmelCase_ : List[str] = F"down_blocks.{i}.resnets.{j}" UpperCAmelCase_ : List[str] = F"input_blocks.{current_layer}.0" UpperCAmelCase_ : List[Any] = True if j == 0 and downsample_block_has_skip else False UpperCAmelCase_ : Any = convert_resnet(A , A , A , A , has_skip=A ) UpperCAmelCase_ : Union[str, Any] = F"down_blocks.{i}.attentions.{j}" UpperCAmelCase_ : Union[str, Any] = F"input_blocks.{current_layer}.1" UpperCAmelCase_ : Union[str, Any] = convert_attention( A , A , A , A , A ) current_layer += 1 if i != len(A ) - 1: UpperCAmelCase_ : Optional[Any] = F"down_blocks.{i}.downsamplers.0" UpperCAmelCase_ : Any = F"input_blocks.{current_layer}.0" UpperCAmelCase_ : str = convert_resnet(A , A , A , A ) current_layer += 1 UpperCAmelCase_ : List[str] = current_channels # hardcoded the mid-block for now UpperCAmelCase_ : Union[str, Any] = '''mid_block.resnets.0''' UpperCAmelCase_ : str = '''middle_block.0''' UpperCAmelCase_ : str = convert_resnet(A , A , A , A ) UpperCAmelCase_ : str = '''mid_block.attentions.0''' UpperCAmelCase_ : Tuple = '''middle_block.1''' UpperCAmelCase_ : Any = convert_attention(A , A , A , A , A ) UpperCAmelCase_ : Optional[int] = '''mid_block.resnets.1''' UpperCAmelCase_ : Union[str, Any] = '''middle_block.2''' UpperCAmelCase_ : List[str] = convert_resnet(A , A , A , A ) UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : Tuple = unet_config['''up_block_types'''] for i, layer_type in enumerate(A ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): UpperCAmelCase_ : Tuple = F"up_blocks.{i}.resnets.{j}" UpperCAmelCase_ : Optional[int] = F"output_blocks.{current_layer}.0" UpperCAmelCase_ : str = convert_resnet(A , A , A , A , has_skip=A ) current_layer += 1 if i != len(A ) - 1: UpperCAmelCase_ : int = F"up_blocks.{i}.upsamplers.0" UpperCAmelCase_ : Dict = F"output_blocks.{current_layer-1}.1" UpperCAmelCase_ : int = convert_resnet(A , A , A , A ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): UpperCAmelCase_ : Any = F"up_blocks.{i}.resnets.{j}" UpperCAmelCase_ : int = F"output_blocks.{current_layer}.0" UpperCAmelCase_ : Optional[int] = convert_resnet(A , A , A , A , has_skip=A ) UpperCAmelCase_ : int = F"up_blocks.{i}.attentions.{j}" UpperCAmelCase_ : Any = F"output_blocks.{current_layer}.1" UpperCAmelCase_ : Any = convert_attention( A , A , A , A , A ) current_layer += 1 if i != len(A ) - 1: UpperCAmelCase_ : Optional[Any] = F"up_blocks.{i}.upsamplers.0" UpperCAmelCase_ : Dict = F"output_blocks.{current_layer-1}.2" UpperCAmelCase_ : Union[str, Any] = convert_resnet(A , A , A , A ) UpperCAmelCase_ : Optional[int] = checkpoint['''out.0.weight'''] UpperCAmelCase_ : Optional[Any] = checkpoint['''out.0.bias'''] UpperCAmelCase_ : Tuple = checkpoint['''out.2.weight'''] UpperCAmelCase_ : str = checkpoint['''out.2.bias'''] return new_checkpoint if __name__ == "__main__": _UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.') parser.add_argument( '--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.' ) parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.') _UpperCamelCase : Union[str, Any] = parser.parse_args() _UpperCamelCase : Dict = strabool(args.class_cond) _UpperCamelCase : str = os.path.basename(args.unet_path) print(f'''Checkpoint: {ckpt_name}''') # Get U-Net config if "imagenet64" in ckpt_name: _UpperCamelCase : int = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): _UpperCamelCase : Any = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: _UpperCamelCase : str = TEST_UNET_CONFIG else: raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''') if not args.class_cond: _UpperCamelCase : List[Any] = None _UpperCamelCase : Dict = con_pt_to_diffuser(args.unet_path, unet_config) _UpperCamelCase : Tuple = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: _UpperCamelCase : List[str] = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: _UpperCamelCase : Optional[Any] = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): _UpperCamelCase : Optional[int] = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''') _UpperCamelCase : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config) _UpperCamelCase : List[Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
304
'''simple docstring''' from __future__ import annotations def __UpperCAmelCase ( A : str ) -> list[int]: return [ord(A ) - 9_6 for elem in plain] def __UpperCAmelCase ( A : list[int] ) -> str: return "".join(chr(elem + 9_6 ) for elem in encoded ) def __UpperCAmelCase ( ) -> None: UpperCAmelCase_ : Tuple = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''' , A ) print('''Decoded:''' , decode(A ) ) if __name__ == "__main__": main()
304
1