code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import qiskit def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> qiskit.result.counts.Counts: '''simple docstring''' UpperCAmelCase_ = qiskit.Aer.get_backend("aer_simulator" ) UpperCAmelCase_ = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator UpperCAmelCase_ = qiskit.execute(snake_case_ , snake_case_ , shots=10_00 ) # Return the histogram data of the results of the experiment return job.result().get_counts(snake_case_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: Optional[int] =half_adder(1, 1) print(f"Half Adder Output Qubit Counts: {counts}")
1
'''simple docstring''' from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig __lowercase = logging.get_logger(__name__) __lowercase = '''T5Config''' class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : List[str] = '''mt5''' UpperCAmelCase_ : Tuple = MTaConfig class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : List[Any] = '''mt5''' UpperCAmelCase_ : int = MTaConfig class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : Tuple = '''mt5''' UpperCAmelCase_ : Union[str, Any] = MTaConfig
272
0
'''simple docstring''' import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging lowerCamelCase : int = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[int]: """simple docstring""" lowercase__ = nn.functional.normalize(A ) lowercase__ = nn.functional.normalize(A ) return torch.mm(A , normalized_text_embeds.t() ) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Any = CLIPConfig lowerCAmelCase__ : Union[str, Any] = ["""CLIPEncoderLayer"""] def __init__(self : List[str] , UpperCamelCase : CLIPConfig ): '''simple docstring''' super().__init__(UpperCamelCase ) lowercase__ = CLIPVisionModel(config.vision_config ) lowercase__ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=UpperCamelCase ) lowercase__ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=UpperCamelCase ) lowercase__ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=UpperCamelCase ) lowercase__ = nn.Parameter(torch.ones(17 ) , requires_grad=UpperCamelCase ) lowercase__ = nn.Parameter(torch.ones(3 ) , requires_grad=UpperCamelCase ) @torch.no_grad() def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : int ): '''simple docstring''' lowercase__ = self.vision_model(UpperCamelCase )[1] # pooled_output lowercase__ = self.visual_projection(UpperCamelCase ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 lowercase__ = cosine_distance(UpperCamelCase , self.special_care_embeds ).cpu().float().numpy() lowercase__ = cosine_distance(UpperCamelCase , self.concept_embeds ).cpu().float().numpy() lowercase__ = [] lowercase__ = image_embeds.shape[0] for i in range(UpperCamelCase ): lowercase__ = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images lowercase__ = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): lowercase__ = special_cos_dist[i][concept_idx] lowercase__ = self.special_care_embeds_weights[concept_idx].item() lowercase__ = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} ) lowercase__ = 0.01 for concept_idx in range(len(cos_dist[0] ) ): lowercase__ = cos_dist[i][concept_idx] lowercase__ = self.concept_embeds_weights[concept_idx].item() lowercase__ = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(UpperCamelCase ) result.append(UpperCamelCase ) lowercase__ = [len(res['''bad_concepts'''] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def UpperCamelCase__ (self : Tuple , UpperCamelCase : torch.FloatTensor , UpperCamelCase : torch.FloatTensor ): '''simple docstring''' lowercase__ = self.vision_model(UpperCamelCase )[1] # pooled_output lowercase__ = self.visual_projection(UpperCamelCase ) lowercase__ = cosine_distance(UpperCamelCase , self.special_care_embeds ) lowercase__ = cosine_distance(UpperCamelCase , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images lowercase__ = 0.0 lowercase__ = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) lowercase__ = torch.any(special_scores > 0 , dim=1 ) lowercase__ = special_care * 0.01 lowercase__ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) lowercase__ = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) lowercase__ = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
2
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __lowercase = { '''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''', '''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''', } class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : List[str] = '''ernie_m''' UpperCAmelCase_ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self , __lowerCAmelCase = 250002 , __lowerCAmelCase = 768 , __lowerCAmelCase = 12 , __lowerCAmelCase = 12 , __lowerCAmelCase = 3072 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 514 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1E-0_5 , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ): """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = classifier_dropout lowerCAmelCase = is_decoder lowerCAmelCase = act_dropout
272
0
'''simple docstring''' lowercase : Optional[int] = {str(digit): digit**5 for digit in range(10)} def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case__ ) ) def lowerCAmelCase_ ( ): '''simple docstring''' return sum( number for number in range(1000 , 100_0000 ) if number == digits_fifth_powers_sum(snake_case__ ) ) if __name__ == "__main__": print(solution())
3
'''simple docstring''' import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __lowercase = logging.getLogger(__name__) class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : Any = '''sequence-classification''' def __init__( self , __lowerCAmelCase): """simple docstring""" if type(__lowerCAmelCase) == dict: lowerCAmelCase = Namespace(**__lowerCAmelCase) lowerCAmelCase = glue_output_modes[hparams.task] lowerCAmelCase = glue_tasks_num_labels[hparams.task] super().__init__(__lowerCAmelCase , __lowerCAmelCase , self.mode) def a_ ( self , **__lowerCAmelCase): """simple docstring""" return self.model(**__lowerCAmelCase) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None lowerCAmelCase = self(**__lowerCAmelCase) lowerCAmelCase = outputs[0] lowerCAmelCase = self.trainer.lr_schedulers[0]["""scheduler"""] lowerCAmelCase = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def a_ ( self): """simple docstring""" lowerCAmelCase = self.hparams lowerCAmelCase = processors[args.task]() lowerCAmelCase = processor.get_labels() for mode in ["train", "dev"]: lowerCAmelCase = self._feature_file(__lowerCAmelCase) if os.path.exists(__lowerCAmelCase) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , __lowerCAmelCase) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir) lowerCAmelCase = ( processor.get_dev_examples(args.data_dir) if mode == """dev""" else processor.get_train_examples(args.data_dir) ) lowerCAmelCase = convert_examples_to_features( __lowerCAmelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("""Saving features into cached file %s""" , __lowerCAmelCase) torch.save(__lowerCAmelCase , __lowerCAmelCase) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False): """simple docstring""" lowerCAmelCase = """dev""" if mode == """test""" else mode lowerCAmelCase = self._feature_file(__lowerCAmelCase) logger.info("""Loading features from cached file %s""" , __lowerCAmelCase) lowerCAmelCase = torch.load(__lowerCAmelCase) lowerCAmelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long) lowerCAmelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long) lowerCAmelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long) if self.hparams.glue_output_mode == "classification": lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.long) elif self.hparams.glue_output_mode == "regression": lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.float) return DataLoader( TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase) , batch_size=__lowerCAmelCase , shuffle=__lowerCAmelCase , ) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None lowerCAmelCase = self(**__lowerCAmelCase) lowerCAmelCase , lowerCAmelCase = outputs[:2] lowerCAmelCase = logits.detach().cpu().numpy() lowerCAmelCase = inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = torch.stack([x["""val_loss"""] for x in outputs]).mean().detach().cpu().item() lowerCAmelCase = np.concatenate([x["""pred"""] for x in outputs] , axis=0) if self.hparams.glue_output_mode == "classification": lowerCAmelCase = np.argmax(__lowerCAmelCase , axis=1) elif self.hparams.glue_output_mode == "regression": lowerCAmelCase = np.squeeze(__lowerCAmelCase) lowerCAmelCase = np.concatenate([x["""target"""] for x in outputs] , axis=0) lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])] lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])] lowerCAmelCase = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCAmelCase , __lowerCAmelCase)} lowerCAmelCase = dict(results.items()) lowerCAmelCase = results return ret, preds_list, out_label_list def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase) lowerCAmelCase = ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase) lowerCAmelCase = ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def a_ ( __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase) parser.add_argument( """--max_seq_length""" , default=128 , type=__lowerCAmelCase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--task""" , default="""""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The GLUE task to run""" , ) parser.add_argument( """--gpus""" , default=0 , type=__lowerCAmelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""") return parser def snake_case__ ( ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase = argparse.ArgumentParser() add_generic_args(_A , os.getcwd() ) lowerCAmelCase = GLUETransformer.add_model_specific_args(_A , os.getcwd() ) lowerCAmelCase = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: lowerCAmelCase = os.path.join( """./results""" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , ) os.makedirs(args.output_dir ) lowerCAmelCase = GLUETransformer(_A ) lowerCAmelCase = generic_train(_A , _A ) # Optionally, predict on dev set and write to output_dir if args.do_predict: lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=_A ) ) lowerCAmelCase = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_A ) if __name__ == "__main__": main()
272
0
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class UpperCAmelCase_ ( __lowercase ): lowerCamelCase : List[str] = '''naver-clova-ix/donut-base-finetuned-docvqa''' lowerCamelCase : List[Any] = ( '''This is a tool that answers a question about an document (pdf). It takes an input named `document` which ''' '''should be the document containing the information, as well as a `question` that is the question about the ''' '''document. It returns a text that contains the answer to the question.''' ) lowerCamelCase : Union[str, Any] = '''document_qa''' lowerCamelCase : List[str] = AutoProcessor lowerCamelCase : List[str] = VisionEncoderDecoderModel lowerCamelCase : Union[str, Any] = ['''image''', '''text'''] lowerCamelCase : Any = ['''text'''] def __init__( self : Optional[int] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]: if not is_vision_available(): raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ ) def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : "Image" , UpperCAmelCase__ : str ) -> Dict: lowerCAmelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' lowerCAmelCase = task_prompt.replace('{user_input}' , UpperCAmelCase__ ) lowerCAmelCase = self.pre_processor.tokenizer( UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_tensors='pt' ).input_ids lowerCAmelCase = self.pre_processor(UpperCAmelCase__ , return_tensors='pt' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[int] ) -> int: return self.model.generate( inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCAmelCase__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCAmelCase__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCAmelCase__ , ).sequences def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Tuple ) -> Dict: lowerCAmelCase = self.pre_processor.batch_decode(UpperCAmelCase__ )[0] lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' ) lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' ) lowerCAmelCase = re.sub(R'<.*?>' , '' , UpperCAmelCase__ , count=1 ).strip() # remove first task start token lowerCAmelCase = self.pre_processor.tokenajson(UpperCAmelCase__ ) return sequence["answer"]
4
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor __lowercase = logging.get_logger(__name__) class a__( lowerCAmelCase__ ): '''simple docstring''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" warnings.warn( """The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use DeformableDetrImageProcessor instead.""" , __lowerCAmelCase , ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
272
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: UpperCAmelCase__ = None UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} UpperCAmelCase__ = { '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json''' ), }, } UpperCAmelCase__ = { '''moussaKam/mbarthez''': 1024, '''moussaKam/barthez''': 1024, '''moussaKam/barthez-orangesum-title''': 1024, } UpperCAmelCase__ = '''▁''' class lowerCamelCase__ ( lowerCAmelCase): SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask'''] SCREAMING_SNAKE_CASE__ = BarthezTokenizer def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , **UpperCAmelCase , ) -> List[str]: # Mask token behave like a normal word, i.e. include the space before it _lowercase =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token super().__init__( UpperCAmelCase , tokenizer_file=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , **UpperCAmelCase , ) _lowercase =vocab_file _lowercase =False if not self.vocab_file else True def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _lowercase =[self.cls_token_id] _lowercase =[self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]: _lowercase =[self.sep_token_id] _lowercase =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __A (self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(UpperCAmelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return _lowercase =os.path.join( UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ): copyfile(self.vocab_file , UpperCAmelCase ) return (out_vocab_file,)
5
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowercase = { '''configuration_instructblip''': [ '''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InstructBlipConfig''', '''InstructBlipQFormerConfig''', '''InstructBlipVisionConfig''', ], '''processing_instructblip''': ['''InstructBlipProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InstructBlipQFormerModel''', '''InstructBlipPreTrainedModel''', '''InstructBlipForConditionalGeneration''', '''InstructBlipVisionModel''', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
272
0
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch A : Dict = logging.get_logger(__name__) class __A( a ): snake_case_ = ['''pixel_values'''] def __init__( self , _snake_case = True , _snake_case = None , _snake_case = PILImageResampling.BILINEAR , _snake_case = True , _snake_case = None , _snake_case = True , _snake_case = 1 / 255 , _snake_case = True , _snake_case = None , _snake_case = None , **_snake_case , ) -> None: '''simple docstring''' super().__init__(**_snake_case ) __a = size if size is not None else {'''shortest_edge''': 256} __a = get_size_dict(_snake_case , default_to_square=_snake_case ) __a = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __a = get_size_dict(_snake_case , param_name='''crop_size''' ) __a = do_resize __a = size __a = resample __a = do_center_crop __a = crop_size __a = do_rescale __a = rescale_factor __a = do_normalize __a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __a = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = PILImageResampling.BICUBIC , _snake_case = None , **_snake_case , ) -> np.ndarray: '''simple docstring''' __a = get_size_dict(_snake_case , default_to_square=_snake_case ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __a = get_resize_output_image_size(_snake_case , size=size['''shortest_edge'''] , default_to_square=_snake_case ) return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ) -> np.ndarray: '''simple docstring''' __a = get_size_dict(_snake_case ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" ) return center_crop(_snake_case , size=(size['''height'''], size['''width''']) , data_format=_snake_case , **_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case ) -> np.ndarray: '''simple docstring''' return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case = None , **_snake_case , ) -> np.ndarray: '''simple docstring''' return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ) -> Optional[int]: '''simple docstring''' __a = do_resize if do_resize is not None else self.do_resize __a = size if size is not None else self.size __a = get_size_dict(_snake_case , default_to_square=_snake_case ) __a = resample if resample is not None else self.resample __a = do_center_crop if do_center_crop is not None else self.do_center_crop __a = crop_size if crop_size is not None else self.crop_size __a = get_size_dict(_snake_case , param_name='''crop_size''' ) __a = do_rescale if do_rescale is not None else self.do_rescale __a = rescale_factor if rescale_factor is not None else self.rescale_factor __a = do_normalize if do_normalize is not None else self.do_normalize __a = image_mean if image_mean is not None else self.image_mean __a = image_std if image_std is not None else self.image_std __a = make_list_of_images(_snake_case ) if not valid_images(_snake_case ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __a = [to_numpy_array(_snake_case ) for image in images] if do_resize: __a = [self.resize(image=_snake_case , size=_snake_case , resample=_snake_case ) for image in images] if do_center_crop: __a = [self.center_crop(image=_snake_case , size=_snake_case ) for image in images] if do_rescale: __a = [self.rescale(image=_snake_case , scale=_snake_case ) for image in images] if do_normalize: __a = [self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case ) for image in images] __a = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images] __a = {'''pixel_values''': images} return BatchFeature(data=_snake_case , tensor_type=_snake_case ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> Optional[int]: '''simple docstring''' __a = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_snake_case ) != len(_snake_case ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(_snake_case ): __a = target_sizes.numpy() __a = [] for idx in range(len(_snake_case ) ): __a = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_snake_case ) __a = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_snake_case ) else: __a = logits.argmax(dim=1 ) __a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
6
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class a__( unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Dict = ViTImageProcessor if is_vision_available() else None @property def a_ ( self): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def a_ ( self): """simple docstring""" lowerCAmelCase = (3, 32, 128) lowerCAmelCase = tempfile.mkdtemp() # fmt: off lowerCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase)))) lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(__lowerCAmelCase) + """\n""") lowerCAmelCase = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 128}, } lowerCAmelCase = os.path.join(self.tmpdirname , __lowerCAmelCase) with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase) def a_ ( self , **__lowerCAmelCase): """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase) def a_ ( self , **__lowerCAmelCase): """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase) def a_ ( self): """simple docstring""" shutil.rmtree(self.tmpdirname) def a_ ( self): """simple docstring""" lowerCAmelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta) lowerCAmelCase = Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1)) return image_input def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_image_processor() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) processor.save_pretrained(self.tmpdirname) lowerCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab()) self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor.image_processor , __lowerCAmelCase) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_image_processor() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) processor.save_pretrained(self.tmpdirname) lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""") lowerCAmelCase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0) lowerCAmelCase = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , __lowerCAmelCase) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""np""") lowerCAmelCase = processor(images=__lowerCAmelCase , return_tensors="""np""") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = """test""" lowerCAmelCase = processor(text=__lowerCAmelCase) lowerCAmelCase = tokenizer(__lowerCAmelCase) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = """test""" lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase) self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""]) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase): processor() def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase = processor.char_decode(__lowerCAmelCase) lowerCAmelCase = tokenizer.batch_decode(__lowerCAmelCase) lowerCAmelCase = [seq.replace(""" """ , """""") for seq in decoded_tok] self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = None lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase) self.assertListEqual(list(inputs.keys()) , processor.model_input_names) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = torch.randn(1 , 27 , 38) lowerCAmelCase = torch.randn(1 , 27 , 50257) lowerCAmelCase = torch.randn(1 , 27 , 30522) lowerCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input]) self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
272
0
import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class A ( nn.Module ): """simple docstring""" def __init__( self : List[str] )-> Union[str, Any]: '''simple docstring''' super().__init__() A__ = nn.Linear(3,4 ) A__ = nn.BatchNormad(4 ) A__ = nn.Linear(4,5 ) def snake_case__ ( self : Optional[int],lowercase_ : List[Any] )-> Any: '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(lowercase_ ) ) ) class A ( _UpperCAmelCase ): """simple docstring""" def snake_case__ ( self : List[Any],lowercase_ : List[str],*lowercase_ : Optional[int],**lowercase_ : Tuple )-> Any: '''simple docstring''' return (args[0] + 1,) + args[1:], kwargs class A ( _UpperCAmelCase ): """simple docstring""" def snake_case__ ( self : List[str],lowercase_ : int,lowercase_ : Optional[Any] )-> List[str]: '''simple docstring''' return output + 1 class A ( unittest.TestCase ): """simple docstring""" def snake_case__ ( self : Union[str, Any] )-> Tuple: '''simple docstring''' A__ = ModelForTest() A__ = ModelHook() add_hook_to_module(lowercase_,lowercase_ ) self.assertEqual(test_model._hf_hook,lowercase_ ) self.assertTrue(hasattr(lowercase_,'_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__,'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ),['x'] ) remove_hook_from_module(lowercase_ ) self.assertFalse(hasattr(lowercase_,'_hf_hook' ) ) self.assertFalse(hasattr(lowercase_,'_old_forward' ) ) def snake_case__ ( self : Optional[Any] )-> str: '''simple docstring''' A__ = ModelForTest() A__ = ModelHook() add_hook_to_module(lowercase_,lowercase_ ) add_hook_to_module(lowercase_,lowercase_,append=lowercase_ ) self.assertEqual(isinstance(test_model._hf_hook,lowercase_ ),lowercase_ ) self.assertEqual(len(test_model._hf_hook.hooks ),2 ) self.assertTrue(hasattr(lowercase_,'_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__,'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ),['x'] ) remove_hook_from_module(lowercase_ ) self.assertFalse(hasattr(lowercase_,'_hf_hook' ) ) self.assertFalse(hasattr(lowercase_,'_old_forward' ) ) def snake_case__ ( self : str )-> Any: '''simple docstring''' A__ = ModelForTest() A__ = torch.randn(2,3 ) A__ = test_model(x + 1 ) A__ = test_model(x + 2 ) A__ = PreForwardHook() add_hook_to_module(lowercase_,lowercase_ ) A__ = test_model(lowercase_ ) self.assertTrue(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain A__ = PreForwardHook() add_hook_to_module(lowercase_,lowercase_ ) A__ = test_model(lowercase_ ) self.assertTrue(torch.allclose(lowercase_,lowercase_,atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks A__ = SequentialHook(PreForwardHook(),PreForwardHook() ) add_hook_to_module(lowercase_,lowercase_ ) A__ = test_model(lowercase_ ) assert torch.allclose(lowercase_,lowercase_,atol=1E-5 ) def snake_case__ ( self : Optional[Any] )-> Optional[Any]: '''simple docstring''' A__ = ModelForTest() A__ = torch.randn(2,3 ) A__ = test_model(lowercase_ ) A__ = PostForwardHook() add_hook_to_module(lowercase_,lowercase_ ) A__ = test_model(lowercase_ ) self.assertTrue(torch.allclose(lowercase_,output + 1,atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain A__ = PostForwardHook() add_hook_to_module(lowercase_,lowercase_ ) A__ = test_model(lowercase_ ) self.assertTrue(torch.allclose(lowercase_,output + 1,atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks A__ = SequentialHook(PostForwardHook(),PostForwardHook() ) add_hook_to_module(lowercase_,lowercase_ ) A__ = test_model(lowercase_ ) assert torch.allclose(lowercase_,output + 2,atol=1E-5 ) def snake_case__ ( self : Dict )-> Union[str, Any]: '''simple docstring''' A__ = ModelForTest() A__ = torch.randn(2,3 ) A__ = test_model(lowercase_ ) A__ = PostForwardHook() add_hook_to_module(lowercase_,lowercase_ ) A__ = test_model(lowercase_ ) self.assertTrue(torch.allclose(lowercase_,output + 1 ) ) self.assertTrue(outputa.requires_grad ) A__ = True A__ = test_model(lowercase_ ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def snake_case__ ( self : Optional[int] )-> Any: '''simple docstring''' A__ = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device,torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device,torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device,torch.device('cpu' ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara,AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm,AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara,AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device,torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device,torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device,torch.device(0 ) ) self.assertEqual(model.lineara.weight.device,torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device A__ = torch.randn(2,3 ) A__ = model(lowercase_ ) self.assertEqual(output.device,torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(lowercase_,AlignDevicesHook(io_same_device=lowercase_ ) ) A__ = torch.randn(2,3 ).to(0 ) A__ = model(lowercase_ ) self.assertEqual(output.device,torch.device(0 ) ) def snake_case__ ( self : Dict )-> Optional[Any]: '''simple docstring''' A__ = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device,torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device,torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device,torch.device('cpu' ) ) # This will move each submodule on different devices A__ = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True} add_hook_to_module(model.lineara,AlignDevicesHook(**lowercase_ ) ) add_hook_to_module(model.batchnorm,AlignDevicesHook(**lowercase_ ) ) add_hook_to_module(model.lineara,AlignDevicesHook(**lowercase_ ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device,torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device,torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device,torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device A__ = torch.device(hook_kwargs['execution_device'] ) self.assertEqual(model.batchnorm.running_mean.device,lowercase_ ) A__ = torch.randn(2,3 ) A__ = model(lowercase_ ) self.assertEqual(output.device,lowercase_ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device,torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device,torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device,torch.device('cpu' ) ) # Now test with buffers included in the offload A__ = { 'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True, 'offload_buffers': True, } add_hook_to_module(model.lineara,AlignDevicesHook(**lowercase_ ) ) add_hook_to_module(model.batchnorm,AlignDevicesHook(**lowercase_ ) ) add_hook_to_module(model.lineara,AlignDevicesHook(**lowercase_ ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device,torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device,torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device,torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device,torch.device('meta' ) ) A__ = torch.randn(2,3 ) A__ = model(lowercase_ ) self.assertEqual(output.device,lowercase_ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device,torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device,torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device,torch.device('cpu' ) ) def snake_case__ ( self : Tuple )-> Any: '''simple docstring''' A__ = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device,torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device,torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device,torch.device('cpu' ) ) # This will move each submodule on different devices A__ = 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook(lowercase_,execution_device=lowercase_,offload=lowercase_ ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device,torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device,torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device,torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device A__ = torch.device(lowercase_ ) self.assertEqual(model.batchnorm.running_mean.device,lowercase_ ) A__ = torch.randn(2,3 ) A__ = model(lowercase_ ) self.assertEqual(output.device,lowercase_ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowercase_ ) self.assertEqual(model.lineara.weight.device,torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device,torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device,torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook(lowercase_,execution_device=lowercase_,offload=lowercase_,offload_buffers=lowercase_ ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device,torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device,torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device,torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device,torch.device('meta' ) ) A__ = torch.randn(2,3 ) A__ = model(lowercase_ ) self.assertEqual(output.device,lowercase_ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowercase_ ) self.assertEqual(model.lineara.weight.device,torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device,torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device,torch.device('cpu' ) ) def snake_case__ ( self : List[Any] )-> Union[str, Any]: '''simple docstring''' A__ = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device,torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device,torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device,torch.device('cpu' ) ) # This will move each submodule on different devices A__ = 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook( lowercase_,execution_device=lowercase_,offload=lowercase_,weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device,torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device,torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device,torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device A__ = torch.device(lowercase_ ) self.assertEqual(model.batchnorm.running_mean.device,lowercase_ ) A__ = torch.randn(2,3 ) A__ = model(lowercase_ ) self.assertEqual(output.device,lowercase_ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowercase_ ) self.assertEqual(model.lineara.weight.device,torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device,torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device,torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook( lowercase_,execution_device=lowercase_,offload=lowercase_,weights_map=model.state_dict(),offload_buffers=lowercase_,) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device,torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device,torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device,torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device,torch.device('meta' ) ) A__ = torch.randn(2,3 ) A__ = model(lowercase_ ) self.assertEqual(output.device,lowercase_ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowercase_ ) self.assertEqual(model.lineara.weight.device,torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device,torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device,torch.device('cpu' ) )
7
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class a__( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Tuple = XLMRobertaTokenizer UpperCAmelCase_ : int = XLMRobertaTokenizerFast UpperCAmelCase_ : List[str] = True UpperCAmelCase_ : Optional[int] = True def a_ ( self): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase) tokenizer.save_pretrained(self.tmpdirname) def a_ ( self): """simple docstring""" lowerCAmelCase = """<pad>""" lowerCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase) , __lowerCAmelCase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase) , __lowerCAmelCase) def a_ ( self): """simple docstring""" lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , """<s>""") self.assertEqual(vocab_keys[1] , """<pad>""") self.assertEqual(vocab_keys[-1] , """<mask>""") self.assertEqual(len(__lowerCAmelCase) , 1002) def a_ ( self): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1002) def a_ ( self): """simple docstring""" lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase) lowerCAmelCase = tokenizer.tokenize("""This is a test""") self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""") self.assertListEqual( __lowerCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase) self.assertListEqual( __lowerCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) lowerCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase) self.assertListEqual( __lowerCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def a_ ( self): """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowerCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files)) lowerCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f) self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase) # Checks everything loads correctly in the same way lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__lowerCAmelCase) # Save tokenizer rust, legacy_format=True lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase) lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase) # Checks it save with the same files self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase) # Checks everything loads correctly in the same way lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase)) shutil.rmtree(__lowerCAmelCase) # Save tokenizer rust, legacy_format=False lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase) lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase)) shutil.rmtree(__lowerCAmelCase) @cached_property def a_ ( self): """simple docstring""" return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""") def a_ ( self): """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__lowerCAmelCase , f.name) lowerCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=__lowerCAmelCase) lowerCAmelCase = pickle.dumps(__lowerCAmelCase) pickle.loads(__lowerCAmelCase) def a_ ( self): """simple docstring""" if not self.test_rust_tokenizer: return lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_rust_tokenizer() lowerCAmelCase = """I was born in 92000, and this is falsé.""" lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase) lowerCAmelCase = rust_tokenizer.tokenize(__lowerCAmelCase) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase) lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) lowerCAmelCase = self.get_rust_tokenizer() lowerCAmelCase = tokenizer.encode(__lowerCAmelCase) lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) @slow def a_ ( self): """simple docstring""" lowerCAmelCase = """Hello World!""" lowerCAmelCase = [0, 35378, 6661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase)) @slow def a_ ( self): """simple docstring""" lowerCAmelCase = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) lowerCAmelCase = [ 0, 3293, 83, 10, 4552, 4989, 7986, 678, 10, 5915, 111, 179459, 124850, 4, 6044, 237, 12, 6, 5, 6, 4, 6780, 705, 15, 1388, 44, 378, 10114, 711, 152, 20, 6, 5, 22376, 642, 1221, 15190, 34153, 450, 5608, 959, 1119, 57702, 136, 186, 47, 1098, 29367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6044, 237, 6284, 50901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase)) @slow def a_ ( self): """simple docstring""" lowerCAmelCase = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCAmelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
272
0
import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) lowerCAmelCase_ = logging.getLogger() def __SCREAMING_SNAKE_CASE (): snake_case_ = argparse.ArgumentParser() parser.add_argument('''-f''' ) snake_case_ = parser.parse_args() return args.f def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): snake_case_ = {} snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , '''all_results.json''' ) if os.path.exists(SCREAMING_SNAKE_CASE__ ): with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f: snake_case_ = json.load(SCREAMING_SNAKE_CASE__ ) else: raise ValueError(F'''can\'t find {path}''' ) return results def __SCREAMING_SNAKE_CASE (): snake_case_ = torch.cuda.is_available() and torch_device == '''cuda''' return is_using_cuda and is_apex_available() lowerCAmelCase_ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class snake_case_ ( __A ): '''simple docstring''' @classmethod def snake_case__( cls : Optional[int] ) ->List[Any]: # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU snake_case_ = tempfile.mkdtemp() snake_case_ = os.path.join(cls.tmpdir , '''default_config.yml''' ) write_basic_config(save_location=cls.configPath ) snake_case_ = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def snake_case__( cls : Dict ) ->str: shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__( self : int ) ->Optional[int]: snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = f''' {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking '''.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) snake_case_ = get_results(_UpperCamelCase ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''glue_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__( self : Any ) ->int: snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = f''' {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking '''.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) snake_case_ = get_results(_UpperCamelCase ) self.assertLess(result['''perplexity'''] , 1_0_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''clm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__( self : str ) ->Union[str, Any]: snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = f''' {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ = get_results(_UpperCamelCase ) self.assertLess(result['''perplexity'''] , 4_2 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''mlm_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__( self : Tuple ) ->Union[str, Any]: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu snake_case_ = 7 if get_gpu_count() > 1 else 2 snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = f''' {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ = get_results(_UpperCamelCase ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 ) self.assertLess(result['''train_loss'''] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''ner_no_trainer''' ) ) ) @unittest.skip(reason='''Fix me @muellerzr''' ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__( self : Optional[int] ) ->str: snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = f''' {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ = get_results(_UpperCamelCase ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['''eval_f1'''] , 2_8 ) self.assertGreaterEqual(result['''eval_exact'''] , 2_8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''qa_no_trainer''' ) ) ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__( self : List[str] ) ->List[str]: snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = f''' {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ = get_results(_UpperCamelCase ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''swag_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__( self : str ) ->Any: snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = f''' {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ = get_results(_UpperCamelCase ) self.assertGreaterEqual(result['''eval_rouge1'''] , 1_0 ) self.assertGreaterEqual(result['''eval_rouge2'''] , 2 ) self.assertGreaterEqual(result['''eval_rougeL'''] , 7 ) self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''summarization_no_trainer''' ) ) ) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__( self : Any ) ->List[str]: snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = f''' {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ = get_results(_UpperCamelCase ) self.assertGreaterEqual(result['''eval_bleu'''] , 3_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''epoch_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''translation_no_trainer''' ) ) ) @slow def snake_case__( self : Optional[Any] ) ->Union[str, Any]: snake_case_ = logging.StreamHandler(sys.stdout ) logger.addHandler(_UpperCamelCase ) snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = f''' {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch '''.split() run_command(self._launch_args + testargs ) snake_case_ = get_results(_UpperCamelCase ) self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 ) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} ) def snake_case__( self : Any ) ->Union[str, Any]: snake_case_ = self.get_auto_remove_tmp_dir() snake_case_ = f''' {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 '''.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''' ) run_command(self._launch_args + testargs ) snake_case_ = get_results(_UpperCamelCase ) # The base model scores a 25% self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''step_1''' ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , '''image_classification_no_trainer''' ) ) )
8
'''simple docstring''' def snake_case__ ( _A: int , _A: int ) -> int: '''simple docstring''' while a != 0: lowerCAmelCase , lowerCAmelCase = b % a, a return b def snake_case__ ( _A: int , _A: int ) -> int: '''simple docstring''' if gcd(_A , _A ) != 1: lowerCAmelCase = f"mod inverse of {a!r} and {m!r} does not exist" raise ValueError(_A ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 0, a lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0, 1, m while va != 0: lowerCAmelCase = ua // va lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
272
0
import datasets from .evaluate import evaluate __lowerCAmelCase : int ='\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n' __lowerCAmelCase : Tuple ='\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n' __lowerCAmelCase : Any ='\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowercase ( datasets.Metric ): '''simple docstring''' def __magic_name__( self :Tuple ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': { '''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ), }, '''references''': { '''id''': datasets.Value('''string''' ), '''answers''': datasets.features.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), }, } ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , ) def __magic_name__( self :int , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[str] ) -> Any: __SCREAMING_SNAKE_CASE : str = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions} __SCREAMING_SNAKE_CASE : Union[str, Any] = [ { '''paragraphs''': [ { '''qas''': [ { '''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']], '''id''': ref['''id'''], } for ref in references ] } ] } ] __SCREAMING_SNAKE_CASE : List[str] = evaluate(dataset=lowerCAmelCase__ , predictions=lowerCAmelCase__ ) return score
9
'''simple docstring''' import math import flax.linen as nn import jax.numpy as jnp def snake_case__ ( _A: jnp.ndarray , _A: int , _A: float = 1 , _A: float = 1 , _A: float = 1.0e4 , _A: bool = False , _A: float = 1.0 , ) -> jnp.ndarray: '''simple docstring''' assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even" lowerCAmelCase = float(embedding_dim // 2 ) lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(_A , dtype=jnp.floataa ) * -log_timescale_increment ) lowerCAmelCase = jnp.expand_dims(_A , 1 ) * jnp.expand_dims(_A , 0 ) # scale embeddings lowerCAmelCase = scale * emb if flip_sin_to_cos: lowerCAmelCase = jnp.concatenate([jnp.cos(_A ), jnp.sin(_A )] , axis=1 ) else: lowerCAmelCase = jnp.concatenate([jnp.sin(_A ), jnp.cos(_A )] , axis=1 ) lowerCAmelCase = jnp.reshape(_A , [jnp.shape(_A )[0], embedding_dim] ) return signal class a__( nn.Module ): '''simple docstring''' UpperCAmelCase_ : int = 3_2 UpperCAmelCase_ : jnp.dtype = jnp.floataa @nn.compact def __call__( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""")(__lowerCAmelCase) lowerCAmelCase = nn.silu(__lowerCAmelCase) lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""")(__lowerCAmelCase) return temb class a__( nn.Module ): '''simple docstring''' UpperCAmelCase_ : int = 3_2 UpperCAmelCase_ : bool = False UpperCAmelCase_ : float = 1 @nn.compact def __call__( self , __lowerCAmelCase): """simple docstring""" return get_sinusoidal_embeddings( __lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
272
0
__A = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" __A = [{"type": "code", "content": INSTALL_CONTENT}] __A = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
10
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowercase = { '''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NezhaForNextSentencePrediction''', '''NezhaForMaskedLM''', '''NezhaForPreTraining''', '''NezhaForMultipleChoice''', '''NezhaForQuestionAnswering''', '''NezhaForSequenceClassification''', '''NezhaForTokenClassification''', '''NezhaModel''', '''NezhaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
272
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCAmelCase__ = { 'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig'] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['ConvNextFeatureExtractor'] lowerCAmelCase__ = ['ConvNextImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvNextForImageClassification', 'ConvNextModel', 'ConvNextPreTrainedModel', 'ConvNextBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'TFConvNextForImageClassification', 'TFConvNextModel', 'TFConvNextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
11
'''simple docstring''' from math import sqrt def snake_case__ ( _A: int = 1000000 ) -> int: '''simple docstring''' lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 42 while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_A , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f'{solution() = }')
272
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCAmelCase_ = { 'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST', 'NezhaForNextSentencePrediction', 'NezhaForMaskedLM', 'NezhaForPreTraining', 'NezhaForMultipleChoice', 'NezhaForQuestionAnswering', 'NezhaForSequenceClassification', 'NezhaForTokenClassification', 'NezhaModel', 'NezhaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
12
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowercase = { '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
272
0
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: Tuple = ["a", "b", "c"] # Defaults to last layer if both are None SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = get_aligned_output_features_output_indices(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) self.assertEqual(lowerCAmelCase__ , ["c"]) self.assertEqual(lowerCAmelCase__ , [2]) # Out indices set to match out features SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = get_aligned_output_features_output_indices(["a", "c"] , lowerCAmelCase__ , lowerCAmelCase__) self.assertEqual(lowerCAmelCase__ , ["a", "c"]) self.assertEqual(lowerCAmelCase__ , [0, 2]) # Out features set to match out indices SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = get_aligned_output_features_output_indices(lowerCAmelCase__ , [0, 2] , lowerCAmelCase__) self.assertEqual(lowerCAmelCase__ , ["a", "c"]) self.assertEqual(lowerCAmelCase__ , [0, 2]) # Out features selected from negative indices SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = get_aligned_output_features_output_indices(lowerCAmelCase__ , [-3, -1] , lowerCAmelCase__) self.assertEqual(lowerCAmelCase__ , ["a", "c"]) self.assertEqual(lowerCAmelCase__ , [-3, -1]) def _SCREAMING_SNAKE_CASE ( self : str): # Stage names must be set with self.assertRaises(lowerCAmelCase__): verify_out_features_out_indices(["a", "b"] , (0, 1) , lowerCAmelCase__) # Out features must be a list with self.assertRaises(lowerCAmelCase__): verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"]) # Out features must be a subset of stage names with self.assertRaises(lowerCAmelCase__): verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"]) # Out indices must be a list or tuple with self.assertRaises(lowerCAmelCase__): verify_out_features_out_indices(lowerCAmelCase__ , 0 , ["a", "b"]) # Out indices must be a subset of stage names with self.assertRaises(lowerCAmelCase__): verify_out_features_out_indices(lowerCAmelCase__ , (0, 1) , ["a"]) # Out features and out indices must be the same length with self.assertRaises(lowerCAmelCase__): verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"]) # Out features should match out indices with self.assertRaises(lowerCAmelCase__): verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"]) # Out features and out indices should be in order with self.assertRaises(lowerCAmelCase__): verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"]) # Check passes with valid inputs verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"]) def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: List[str] = BackboneMixin() SCREAMING_SNAKE_CASE_: List[str] = ["a", "b", "c"] SCREAMING_SNAKE_CASE_: Optional[Any] = ["a", "c"] SCREAMING_SNAKE_CASE_: Optional[int] = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["a", "c"]) self.assertEqual(backbone.out_indices , [0, 2]) # Check out features and indices are updated correctly SCREAMING_SNAKE_CASE_: Optional[Any] = ["a", "b"] self.assertEqual(backbone.out_features , ["a", "b"]) self.assertEqual(backbone.out_indices , [0, 1]) SCREAMING_SNAKE_CASE_: Optional[Any] = [-3, -1] self.assertEqual(backbone.out_features , ["a", "c"]) self.assertEqual(backbone.out_indices , [-3, -1])
13
'''simple docstring''' import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class a__( unittest.TestCase ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , ): """simple docstring""" lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18} lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = num_channels lowerCAmelCase = image_size lowerCAmelCase = min_resolution lowerCAmelCase = max_resolution lowerCAmelCase = do_resize lowerCAmelCase = size lowerCAmelCase = do_normalize lowerCAmelCase = image_mean lowerCAmelCase = image_std def a_ ( self): """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class a__( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Tuple = DPTImageProcessor if is_vision_available() else None def a_ ( self): """simple docstring""" lowerCAmelCase = DPTImageProcessingTester(self) @property def a_ ( self): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__lowerCAmelCase , """image_mean""")) self.assertTrue(hasattr(__lowerCAmelCase , """image_std""")) self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize""")) self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""")) self.assertTrue(hasattr(__lowerCAmelCase , """size""")) def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18}) lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42}) def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict) # create random PIL images lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
272
0
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : str = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _lowerCamelCase : List[str] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""), ("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""), ("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""), ("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""), ("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""), ("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""), ("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""), ("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""), ("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""), ("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""), ] ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]: """simple docstring""" A__ = state_dict.pop(lowercase_ ) A__ = val def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]: """simple docstring""" A__ = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: A__ = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) A__ = value else: A__ = value return new_state_dict def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> Dict: """simple docstring""" A__ = '''''' if is_panoptic: A__ = '''conditional_detr.''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) A__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) A__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[:256, :] A__ = in_proj_bias[:256] A__ = in_proj_weight[256:512, :] A__ = in_proj_bias[256:512] A__ = in_proj_weight[-256:, :] A__ = in_proj_bias[-256:] def SCREAMING_SNAKE_CASE ( ) -> List[Any]: """simple docstring""" A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple: """simple docstring""" A__ = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: A__ = '''resnet101''' if "dc5" in model_name: A__ = True A__ = '''panoptic''' in model_name if is_panoptic: A__ = 250 else: A__ = 91 A__ = '''huggingface/label-files''' A__ = '''coco-detection-id2label.json''' A__ = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) ) A__ = {int(lowercase_ ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} # load image processor A__ = '''coco_panoptic''' if is_panoptic else '''coco_detection''' A__ = ConditionalDetrImageProcessor(format=lowercase_ ) # prepare image A__ = prepare_img() A__ = image_processor(images=lowercase_ , return_tensors='''pt''' ) A__ = encoding['''pixel_values'''] logger.info(f"""Converting model {model_name}...""" ) # load original model from torch hub A__ = torch.hub.load('''DeppMeng/ConditionalDETR''' , lowercase_ , pretrained=lowercase_ ).eval() A__ = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: A__ = '''conditional_detr.''' + src rename_key(lowercase_ , lowercase_ , lowercase_ ) A__ = rename_backbone_keys(lowercase_ ) # query, key and value matrices need special treatment read_in_q_k_v(lowercase_ , is_panoptic=lowercase_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them A__ = '''conditional_detr.model.''' if is_panoptic else '''model.''' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('''conditional_detr''' ) and not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ) ): A__ = state_dict.pop(lowercase_ ) A__ = val elif "class_labels_classifier" in key or "bbox_predictor" in key: A__ = state_dict.pop(lowercase_ ) A__ = val elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ): continue else: A__ = state_dict.pop(lowercase_ ) A__ = val else: if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): A__ = state_dict.pop(lowercase_ ) A__ = val # finally, create HuggingFace model and load state dict A__ = ConditionalDetrForSegmentation(lowercase_ ) if is_panoptic else ConditionalDetrForObjectDetection(lowercase_ ) model.load_state_dict(lowercase_ ) model.eval() model.push_to_hub(repo_id=lowercase_ , organization='''DepuMeng''' , commit_message='''Add model''' ) # verify our conversion A__ = conditional_detr(lowercase_ ) A__ = model(lowercase_ ) assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 ) # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(lowercase_ ).mkdir(exist_ok=lowercase_ ) model.save_pretrained(lowercase_ ) image_processor.save_pretrained(lowercase_ ) if __name__ == "__main__": _lowerCamelCase : Dict = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""conditional_detr_resnet50""", type=str, help="""Name of the CONDITIONAL_DETR model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) _lowerCamelCase : Optional[Any] = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
14
'''simple docstring''' from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def snake_case__ ( _A: Union[str, Any] , _A: Tuple , _A: Any=1e-12 ) -> str: '''simple docstring''' lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T return jnp.matmul(_A , norm_emb_a.T ) class a__( nn.Module ): '''simple docstring''' UpperCAmelCase_ : CLIPConfig UpperCAmelCase_ : jnp.dtype = jnp.floataa def a_ ( self): """simple docstring""" lowerCAmelCase = FlaxCLIPVisionModule(self.config.vision_config) lowerCAmelCase = nn.Dense(self.config.projection_dim , use_bias=__lowerCAmelCase , dtype=self.dtype) lowerCAmelCase = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim)) lowerCAmelCase = self.param( """special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim)) lowerCAmelCase = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,)) lowerCAmelCase = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,)) def __call__( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = self.vision_model(__lowerCAmelCase)[1] lowerCAmelCase = self.visual_projection(__lowerCAmelCase) lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.special_care_embeds) lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.concept_embeds) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs lowerCAmelCase = 0.0 lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment lowerCAmelCase = jnp.round(__lowerCAmelCase , 3) lowerCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCAmelCase) # Use a lower threshold if an image has any special care concept lowerCAmelCase = is_special_care * 0.01 lowerCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment lowerCAmelCase = jnp.round(__lowerCAmelCase , 3) lowerCAmelCase = jnp.any(concept_scores > 0 , axis=1) return has_nsfw_concepts class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : int = CLIPConfig UpperCAmelCase_ : Any = '''clip_input''' UpperCAmelCase_ : List[str] = FlaxStableDiffusionSafetyCheckerModule def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = jnp.floataa , __lowerCAmelCase = True , **__lowerCAmelCase , ): """simple docstring""" if input_shape is None: lowerCAmelCase = (1, 224, 224, 3) lowerCAmelCase = self.module_class(config=__lowerCAmelCase , dtype=__lowerCAmelCase , **__lowerCAmelCase) super().__init__(__lowerCAmelCase , __lowerCAmelCase , input_shape=__lowerCAmelCase , seed=__lowerCAmelCase , dtype=__lowerCAmelCase , _do_init=_do_init) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None): """simple docstring""" lowerCAmelCase = jax.random.normal(__lowerCAmelCase , __lowerCAmelCase) lowerCAmelCase , lowerCAmelCase = jax.random.split(__lowerCAmelCase) lowerCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng} lowerCAmelCase = self.module.init(__lowerCAmelCase , __lowerCAmelCase)["""params"""] return random_params def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , ): """simple docstring""" lowerCAmelCase = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1)) return self.module.apply( {"""params""": params or self.params} , jnp.array(__lowerCAmelCase , dtype=jnp.floataa) , rngs={} , )
272
0
import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants SCREAMING_SNAKE_CASE :Optional[Any] = Mapping[str, np.ndarray] SCREAMING_SNAKE_CASE :List[str] = Mapping[str, Any] # Is a nested dict. SCREAMING_SNAKE_CASE :int = 0.01 @dataclasses.dataclass(frozen=__SCREAMING_SNAKE_CASE ) class UpperCAmelCase : '''simple docstring''' snake_case_ = 42 # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. snake_case_ = 42 # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. snake_case_ = 42 # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. snake_case_ = 42 # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. snake_case_ = 42 # [num_res, num_atom_type] # Chain indices for multi-chain predictions snake_case_ = None # Optional remark about the protein. Included as a comment in output PDB # files snake_case_ = None # Templates used to generate this protein (prediction-only) snake_case_ = None # Chain corresponding to each parent snake_case_ = None def UpperCAmelCase ( a_ ) -> Protein: """simple docstring""" __A = r"(\[[A-Z]+\]\n)" __A = [tag.strip() for tag in re.split(a_ , a_ ) if len(a_ ) > 0] __A = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] ) __A = ["N", "CA", "C"] __A = None __A = None __A = None for g in groups: if "[PRIMARY]" == g[0]: __A = g[1][0].strip() for i in range(len(a_ ) ): if seq[i] not in residue_constants.restypes: __A = "X" # FIXME: strings are immutable __A = np.array( [residue_constants.restype_order.get(a_ , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: __A = [] for axis in range(3 ): tertiary.append(list(map(a_ , g[1][axis].split() ) ) ) __A = np.array(a_ ) __A = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(a_ ): __A = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: __A = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) ) __A = np.zeros( ( len(a_ ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(a_ ): __A = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=a_ , atom_mask=a_ , aatype=a_ , residue_index=np.arange(len(a_ ) ) , b_factors=a_ , ) def UpperCAmelCase ( a_ , a_ = 0 ) -> List[str]: """simple docstring""" __A = [] __A = prot.remark if remark is not None: pdb_headers.append(F'''REMARK {remark}''' ) __A = prot.parents __A = prot.parents_chain_index if parents is not None and parents_chain_index is not None: __A = [p for i, p in zip(a_ , a_ ) if i == chain_id] if parents is None or len(a_ ) == 0: __A = ["N/A"] pdb_headers.append(F'''PARENT {' '.join(a_ )}''' ) return pdb_headers def UpperCAmelCase ( a_ , a_ ) -> str: """simple docstring""" __A = [] __A = pdb_str.split("\n" ) __A = prot.remark if remark is not None: out_pdb_lines.append(F'''REMARK {remark}''' ) __A = 42 if prot.parents is not None and len(prot.parents ) > 0: __A = [] if prot.parents_chain_index is not None: __A = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(a_ ) , [] ) parent_dict[str(a_ )].append(a_ ) __A = max([int(a_ ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): __A = parent_dict.get(str(a_ ) , ["N/A"] ) parents_per_chain.append(a_ ) else: parents_per_chain.append(list(prot.parents ) ) else: __A = [["N/A"]] def make_parent_line(a_ ) -> str: return F'''PARENT {' '.join(a_ )}''' out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) __A = 0 for i, l in enumerate(a_ ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(a_ ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(a_ ): __A = parents_per_chain[chain_counter] else: __A = ["N/A"] out_pdb_lines.append(make_parent_line(a_ ) ) return "\n".join(a_ ) def UpperCAmelCase ( a_ ) -> str: """simple docstring""" __A = residue_constants.restypes + ["X"] def res_atoa(a_ ) -> str: return residue_constants.restype_atoa.get(restypes[r] , "UNK" ) __A = residue_constants.atom_types __A = [] __A = prot.atom_mask __A = prot.aatype __A = prot.atom_positions __A = prot.residue_index.astype(np.intaa ) __A = prot.b_factors __A = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError("Invalid aatypes." ) __A = get_pdb_headers(a_ ) if len(a_ ) > 0: pdb_lines.extend(a_ ) __A = aatype.shape[0] __A = 1 __A = 0 __A = string.ascii_uppercase __A = None # Add all atom sites. for i in range(a_ ): __A = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(a_ , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue __A = "ATOM" __A = atom_name if len(a_ ) == 4 else F''' {atom_name}''' __A = "" __A = "" __A = 1.00 __A = atom_name[0] # Protein supports only C, N, O, S, this works. __A = "" __A = "A" if chain_index is not None: __A = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! __A = ( F'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}''' F'''{res_name_a:>3} {chain_tag:>1}''' F'''{residue_index[i]:>4}{insertion_code:>1} ''' F'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}''' F'''{occupancy:>6.2f}{b_factor:>6.2f} ''' F'''{element:>2}{charge:>2}''' ) pdb_lines.append(a_ ) atom_index += 1 __A = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: __A = True __A = chain_index[i + 1] if should_terminate: # Close the chain. __A = "TER" __A = ( F'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}''' ) pdb_lines.append(a_ ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(a_ , a_ ) ) pdb_lines.append("END" ) pdb_lines.append("" ) return "\n".join(a_ ) def UpperCAmelCase ( a_ ) -> np.ndarray: """simple docstring""" return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def UpperCAmelCase ( a_ , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , ) -> Protein: """simple docstring""" return Protein( aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=a_ , remark=a_ , parents=a_ , parents_chain_index=a_ , )
15
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class a__( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Dict = MvpTokenizer UpperCAmelCase_ : Optional[Any] = MvpTokenizerFast UpperCAmelCase_ : str = True UpperCAmelCase_ : List[Any] = filter_roberta_detectors def a_ ( self): """simple docstring""" super().setUp() lowerCAmelCase = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase)))) lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] lowerCAmelCase = {"""unk_token""": """<unk>"""} lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(__lowerCAmelCase) + """\n""") with open(self.merges_file , """w""" , encoding="""utf-8""") as fp: fp.write("""\n""".join(__lowerCAmelCase)) def a_ ( self , **__lowerCAmelCase): """simple docstring""" kwargs.update(self.special_tokens_map) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase) def a_ ( self , **__lowerCAmelCase): """simple docstring""" kwargs.update(self.special_tokens_map) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase) def a_ ( self , __lowerCAmelCase): """simple docstring""" return "lower newer", "lower newer" @cached_property def a_ ( self): """simple docstring""" return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""") @cached_property def a_ ( self): """simple docstring""" return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""") @require_torch def a_ ( self): """simple docstring""" lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] lowerCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase = tokenizer(__lowerCAmelCase , max_length=len(__lowerCAmelCase) , padding=__lowerCAmelCase , return_tensors="""pt""") self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase) self.assertEqual((2, 9) , batch.input_ids.shape) self.assertEqual((2, 9) , batch.attention_mask.shape) lowerCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) # Test that special tokens are reset @require_torch def a_ ( self): """simple docstring""" lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""") # check if input_ids are returned and no labels self.assertIn("""input_ids""" , __lowerCAmelCase) self.assertIn("""attention_mask""" , __lowerCAmelCase) self.assertNotIn("""labels""" , __lowerCAmelCase) self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase) @require_torch def a_ ( self): """simple docstring""" lowerCAmelCase = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase = tokenizer(text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""") self.assertEqual(32 , targets["""input_ids"""].shape[1]) @require_torch def a_ ( self): """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase = tokenizer( ["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""") self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase) self.assertEqual(batch.input_ids.shape , (2, 1024)) @require_torch def a_ ( self): """simple docstring""" lowerCAmelCase = ["""A long paragraph for summarization."""] lowerCAmelCase = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase , return_tensors="""pt""") lowerCAmelCase = inputs["""input_ids"""] lowerCAmelCase = inputs["""labels"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item()) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item()) def a_ ( self): """simple docstring""" pass def a_ ( self): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = """A, <mask> AllenNLP sentence.""" lowerCAmelCase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase) lowerCAmelCase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""])) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , ) lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""]) lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""]) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual( __lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""]) self.assertSequenceEqual( __lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
272
0
"""simple docstring""" from datetime import datetime import requests def __UpperCAmelCase ( __lowerCamelCase ) -> bytes: lowercase__ : List[str] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url=''' lowercase__ : Optional[int] = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src'''] return requests.get(__lowerCamelCase ).content if __name__ == "__main__": lowerCAmelCase_ = input('Enter Video/IGTV url: ').strip() lowerCAmelCase_ = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4''' with open(file_name, 'wb') as fp: fp.write(download_video(url)) print(F'''Done. Video saved to disk as {file_name}.''')
16
'''simple docstring''' import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class a__( enum.Enum ): '''simple docstring''' UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : Dict = 1 UpperCAmelCase_ : Any = 2 @add_end_docstrings(lowerCAmelCase__ ) class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : int = ''' In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> ''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" super().__init__(*__lowerCAmelCase , **__lowerCAmelCase) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. lowerCAmelCase = None if self.model.config.prefix is not None: lowerCAmelCase = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. lowerCAmelCase = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params) lowerCAmelCase = {**self._preprocess_params, **preprocess_params} lowerCAmelCase = {**self._forward_params, **forward_params} def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ): """simple docstring""" lowerCAmelCase = {} if prefix is not None: lowerCAmelCase = prefix if prefix: lowerCAmelCase = self.tokenizer( __lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework) lowerCAmelCase = prefix_inputs["""input_ids"""].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected" """ [None, 'hole']""") lowerCAmelCase = handle_long_generation preprocess_params.update(__lowerCAmelCase) lowerCAmelCase = generate_kwargs lowerCAmelCase = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""") if return_tensors is not None: raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""") lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""") lowerCAmelCase = ReturnType.TENSORS if return_type is not None: lowerCAmelCase = return_type if clean_up_tokenization_spaces is not None: lowerCAmelCase = clean_up_tokenization_spaces if stop_sequence is not None: lowerCAmelCase = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase) if len(__lowerCAmelCase) > 1: warnings.warn( """Stopping on a multiple token sequence is not yet supported on transformers. The first token of""" """ the stop sequence will be used as the stop sequence string in the interim.""") lowerCAmelCase = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({"""add_space_before_punct_symbol""": True}) return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase) def __call__( self , __lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" return super().__call__(__lowerCAmelCase , **__lowerCAmelCase) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , **__lowerCAmelCase): """simple docstring""" lowerCAmelCase = self.tokenizer( prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework) lowerCAmelCase = prompt_text if handle_long_generation == "hole": lowerCAmelCase = inputs["""input_ids"""].shape[-1] if "max_new_tokens" in generate_kwargs: lowerCAmelCase = generate_kwargs["""max_new_tokens"""] else: lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len if new_tokens < 0: raise ValueError("""We cannot infer how many new tokens are expected""") if cur_len + new_tokens > self.tokenizer.model_max_length: lowerCAmelCase = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( """We cannot use `hole` to handle this generation the number of desired tokens exceeds the""" """ models max length""") lowerCAmelCase = inputs["""input_ids"""][:, -keep_length:] if "attention_mask" in inputs: lowerCAmelCase = inputs["""attention_mask"""][:, -keep_length:] return inputs def a_ ( self , __lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" lowerCAmelCase = model_inputs["""input_ids"""] lowerCAmelCase = model_inputs.get("""attention_mask""" , __lowerCAmelCase) # Allow empty prompts if input_ids.shape[1] == 0: lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = 1 else: lowerCAmelCase = input_ids.shape[0] lowerCAmelCase = model_inputs.pop("""prompt_text""") # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. lowerCAmelCase = generate_kwargs.pop("""prefix_length""" , 0) if prefix_length > 0: lowerCAmelCase = """max_new_tokens""" in generate_kwargs or ( """generation_config""" in generate_kwargs and generate_kwargs["""generation_config"""].max_new_tokens is not None ) if not has_max_new_tokens: lowerCAmelCase = generate_kwargs.get("""max_length""") or self.model.config.max_length generate_kwargs["max_length"] += prefix_length lowerCAmelCase = """min_new_tokens""" in generate_kwargs or ( """generation_config""" in generate_kwargs and generate_kwargs["""generation_config"""].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL lowerCAmelCase = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = generated_sequence.shape[0] if self.framework == "pt": lowerCAmelCase = generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:]) elif self.framework == "tf": lowerCAmelCase = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:])) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.FULL_TEXT , __lowerCAmelCase=True): """simple docstring""" lowerCAmelCase = model_outputs["""generated_sequence"""][0] lowerCAmelCase = model_outputs["""input_ids"""] lowerCAmelCase = model_outputs["""prompt_text"""] lowerCAmelCase = generated_sequence.numpy().tolist() lowerCAmelCase = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: lowerCAmelCase = {"""generated_token_ids""": sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text lowerCAmelCase = self.tokenizer.decode( __lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: lowerCAmelCase = 0 else: lowerCAmelCase = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )) if return_type == ReturnType.FULL_TEXT: lowerCAmelCase = prompt_text + text[prompt_length:] else: lowerCAmelCase = text[prompt_length:] lowerCAmelCase = {"""generated_text""": all_text} records.append(__lowerCAmelCase) return records
272
0
"""simple docstring""" import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Optional[int] ): with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights __lowercase = FlaxDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=UpperCAmelCase__, cache_dir=UpperCAmelCase__ ) __lowercase = [t[-1] for t in os.walk(os.path.join(UpperCAmelCase__, os.listdir(UpperCAmelCase__ )[0], "snapshots" ) )] __lowercase = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith(".bin" ) for f in files ) @slow @require_flax class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : List[Any] ): __lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=UpperCAmelCase__ ) __lowercase = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) __lowercase = jax.random.PRNGKey(0 ) __lowercase = 4 __lowercase = jax.device_count() __lowercase = num_samples * [prompt] __lowercase = pipeline.prepare_inputs(UpperCAmelCase__ ) # shard inputs and rng __lowercase = replicate(UpperCAmelCase__ ) __lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = shard(UpperCAmelCase__ ) __lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images assert images.shape == (num_samples, 1, 6_4, 6_4, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 4.1_514_745 ) < 1E-3 assert np.abs(np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 49_947.875 ) < 5E-1 __lowercase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(UpperCAmelCase__ ) == num_samples def _lowercase ( self : int ): __lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="flax", safety_checker=UpperCAmelCase__ ) __lowercase = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) __lowercase = jax.random.PRNGKey(0 ) __lowercase = 5_0 __lowercase = jax.device_count() __lowercase = num_samples * [prompt] __lowercase = pipeline.prepare_inputs(UpperCAmelCase__ ) # shard inputs and rng __lowercase = replicate(UpperCAmelCase__ ) __lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = shard(UpperCAmelCase__ ) __lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.05_652_401) ) < 1E-3 assert np.abs((np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 2_383_808.2) ) < 5E-1 def _lowercase ( self : Optional[int] ): __lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, safety_checker=UpperCAmelCase__ ) __lowercase = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) __lowercase = jax.random.PRNGKey(0 ) __lowercase = 5_0 __lowercase = jax.device_count() __lowercase = num_samples * [prompt] __lowercase = pipeline.prepare_inputs(UpperCAmelCase__ ) # shard inputs and rng __lowercase = replicate(UpperCAmelCase__ ) __lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = shard(UpperCAmelCase__ ) __lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3 assert np.abs((np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1 def _lowercase ( self : Optional[int] ): __lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa ) __lowercase = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) __lowercase = jax.random.PRNGKey(0 ) __lowercase = 5_0 __lowercase = jax.device_count() __lowercase = num_samples * [prompt] __lowercase = pipeline.prepare_inputs(UpperCAmelCase__ ) # shard inputs and rng __lowercase = replicate(UpperCAmelCase__ ) __lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = shard(UpperCAmelCase__ ) __lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3 assert np.abs((np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 2_373_516.75) ) < 5E-1 def _lowercase ( self : Optional[int] ): __lowercase = FlaxDDIMScheduler( beta_start=0.00_085, beta_end=0.012, beta_schedule="scaled_linear", set_alpha_to_one=UpperCAmelCase__, steps_offset=1, ) __lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, scheduler=UpperCAmelCase__, safety_checker=UpperCAmelCase__, ) __lowercase = scheduler.create_state() __lowercase = scheduler_state __lowercase = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) __lowercase = jax.random.PRNGKey(0 ) __lowercase = 5_0 __lowercase = jax.device_count() __lowercase = num_samples * [prompt] __lowercase = pipeline.prepare_inputs(UpperCAmelCase__ ) # shard inputs and rng __lowercase = replicate(UpperCAmelCase__ ) __lowercase = jax.random.split(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = shard(UpperCAmelCase__ ) __lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.floataa ).sum() - 0.045_043_945) ) < 1E-3 assert np.abs((np.abs(UpperCAmelCase__, dtype=np.floataa ).sum() - 2_347_693.5) ) < 5E-1 def _lowercase ( self : Dict ): __lowercase = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) __lowercase = jax.device_count() __lowercase = num_samples * [prompt] __lowercase = jax.random.split(jax.random.PRNGKey(0 ), UpperCAmelCase__ ) __lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, safety_checker=UpperCAmelCase__, ) __lowercase = replicate(UpperCAmelCase__ ) __lowercase = pipeline.prepare_inputs(UpperCAmelCase__ ) __lowercase = shard(UpperCAmelCase__ ) __lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) __lowercase = images[2, 0, 2_5_6, 1_0:1_7, 1] # With memory efficient attention __lowercase ,__lowercase = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloataa, safety_checker=UpperCAmelCase__, use_memory_efficient_attention=UpperCAmelCase__, ) __lowercase = replicate(UpperCAmelCase__ ) __lowercase = pipeline.prepare_inputs(UpperCAmelCase__ ) __lowercase = shard(UpperCAmelCase__ ) __lowercase = pipeline(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, jit=UpperCAmelCase__ ).images assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) __lowercase = images[2, 0, 2_5_6, 1_0:1_7, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1E-2
17
'''simple docstring''' def snake_case__ ( _A: str ) -> list[int]: '''simple docstring''' lowerCAmelCase = [0 for i in range(len(_A ) )] # initialize interval's left pointer and right pointer lowerCAmelCase , lowerCAmelCase = 0, 0 for i in range(1 , len(_A ) ): # case when current index is inside the interval if i <= right_pointer: lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] ) lowerCAmelCase = min_edge while go_next(_A , _A , _A ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: lowerCAmelCase , lowerCAmelCase = i, i + z_result[i] - 1 return z_result def snake_case__ ( _A: int , _A: list[int] , _A: str ) -> bool: '''simple docstring''' return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]] def snake_case__ ( _A: str , _A: str ) -> int: '''simple docstring''' lowerCAmelCase = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string lowerCAmelCase = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(_A ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
272
0
def _snake_case ( lowerCAmelCase : Tuple ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = 1 SCREAMING_SNAKE_CASE_ : Optional[Any] = 2 while i * i <= n: SCREAMING_SNAKE_CASE_ : Optional[Any] = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def _snake_case ( ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = 1 SCREAMING_SNAKE_CASE_ : List[str] = 1 while True: i += 1 t_num += i if count_divisors(lowerCAmelCase ) > 5_0_0: break return t_num if __name__ == "__main__": print(solution())
18
'''simple docstring''' from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : str = '''EncodecFeatureExtractor''' UpperCAmelCase_ : Dict = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" super().__init__(__lowerCAmelCase , __lowerCAmelCase) lowerCAmelCase = self.feature_extractor lowerCAmelCase = False def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True): """simple docstring""" return self.tokenizer.get_decoder_prompt_ids(task=__lowerCAmelCase , language=__lowerCAmelCase , no_timestamps=__lowerCAmelCase) def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase) lowerCAmelCase = kwargs.pop("""sampling_rate""" , __lowerCAmelCase) lowerCAmelCase = kwargs.pop("""text""" , __lowerCAmelCase) if len(__lowerCAmelCase) > 0: lowerCAmelCase = args[0] lowerCAmelCase = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""") if text is not None: lowerCAmelCase = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase) if audio is not None: lowerCAmelCase = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase) if audio is None: return inputs elif text is None: return audio_inputs else: lowerCAmelCase = audio_inputs["""input_values"""] if "padding_mask" in audio_inputs: lowerCAmelCase = audio_inputs["""padding_mask"""] return inputs def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase) lowerCAmelCase = kwargs.pop("""padding_mask""" , __lowerCAmelCase) if len(__lowerCAmelCase) > 0: lowerCAmelCase = args[0] lowerCAmelCase = args[1:] if audio_values is not None: return self._decode_audio(__lowerCAmelCase , padding_mask=__lowerCAmelCase) else: return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase) def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None): """simple docstring""" lowerCAmelCase = to_numpy(__lowerCAmelCase) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = audio_values.shape if padding_mask is None: return list(__lowerCAmelCase) lowerCAmelCase = to_numpy(__lowerCAmelCase) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) lowerCAmelCase = seq_len - padding_mask.shape[-1] lowerCAmelCase = 1 - self.feature_extractor.padding_value lowerCAmelCase = np.pad(__lowerCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__lowerCAmelCase) lowerCAmelCase = audio_values.tolist() for i in range(__lowerCAmelCase): lowerCAmelCase = np.asarray(audio_values[i])[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] lowerCAmelCase = sliced_audio.reshape(__lowerCAmelCase , -1) return audio_values
272
0
from __future__ import annotations def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = None ): lowerCamelCase_ = word_bank or [] # create a table lowerCamelCase_ = len(lowerCamelCase__ ) + 1 lowerCamelCase_ = [] for _ in range(lowerCamelCase__ ): table.append([] ) # seed value lowerCamelCase_ = [[]] # because empty string has empty combination # iterate through the indices for i in range(lowerCamelCase__ ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(lowerCamelCase__ )] == word: lowerCamelCase_ = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(lowerCamelCase__ )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(lowerCamelCase__ )]: combination.reverse() return table[len(lowerCamelCase__ )] if __name__ == "__main__": print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa'''])) print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t'''])) print( all_construct( '''hexagonosaurus''', ['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''], ) )
19
'''simple docstring''' import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class a__( unittest.TestCase ): '''simple docstring''' @property def a_ ( self): """simple docstring""" torch.manual_seed(0) lowerCAmelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def a_ ( self): """simple docstring""" lowerCAmelCase = self.dummy_uncond_unet lowerCAmelCase = PNDMScheduler() lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase) pndm.to(__lowerCAmelCase) pndm.set_progress_bar_config(disable=__lowerCAmelCase) lowerCAmelCase = torch.manual_seed(0) lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""").images lowerCAmelCase = torch.manual_seed(0) lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=__lowerCAmelCase)[0] lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 @slow @require_torch class a__( unittest.TestCase ): '''simple docstring''' def a_ ( self): """simple docstring""" lowerCAmelCase = """google/ddpm-cifar10-32""" lowerCAmelCase = UNetaDModel.from_pretrained(__lowerCAmelCase) lowerCAmelCase = PNDMScheduler() lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase) pndm.to(__lowerCAmelCase) pndm.set_progress_bar_config(disable=__lowerCAmelCase) lowerCAmelCase = torch.manual_seed(0) lowerCAmelCase = pndm(generator=__lowerCAmelCase , output_type="""numpy""").images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
272
0
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": lowercase : Any = argparse.ArgumentParser() parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--txt2img_unclip""", default="""kakaobrain/karlo-v1-alpha""", type=str, required=False, help="""The pretrained txt2img unclip.""", ) lowercase : List[Any] = parser.parse_args() lowercase : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) lowercase : Any = CLIPImageProcessor() lowercase : Any = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""") lowercase : Tuple = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
20
'''simple docstring''' from string import ascii_lowercase, ascii_uppercase def snake_case__ ( _A: str ) -> str: '''simple docstring''' if not sentence: return "" lowerCAmelCase = dict(zip(_A , _A ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
272
0
import re def UpperCamelCase_( lowerCamelCase_ ) -> bool: _lowercase : str = re.compile( R'^(?:0|94|\+94|0{2}94)' R'7(0|1|2|4|5|6|7|8)' R'(-| |)' R'\d{7}$' ) return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Optional[Any] = "0094702343221" print(is_sri_lankan_phone_number(phone))
21
'''simple docstring''' import os import string import sys __lowercase = 1 << 8 __lowercase = { '''tab''': ord('''\t'''), '''newline''': ord('''\r'''), '''esc''': 2_7, '''up''': 6_5 + ARROW_KEY_FLAG, '''down''': 6_6 + ARROW_KEY_FLAG, '''right''': 6_7 + ARROW_KEY_FLAG, '''left''': 6_8 + ARROW_KEY_FLAG, '''mod_int''': 9_1, '''undefined''': sys.maxsize, '''interrupt''': 3, '''insert''': 5_0, '''delete''': 5_1, '''pg_up''': 5_3, '''pg_down''': 5_4, } __lowercase = KEYMAP['''up'''] __lowercase = KEYMAP['''left'''] if sys.platform == "win32": __lowercase = [] __lowercase = { B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, } for i in range(1_0): __lowercase = ord(str(i)) def snake_case__ ( ) -> List[Any]: '''simple docstring''' if os.name == "nt": import msvcrt lowerCAmelCase = """mbcs""" # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(_A ) == 0: # Read the keystroke lowerCAmelCase = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): lowerCAmelCase = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: lowerCAmelCase = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) ) WIN_CH_BUFFER.append(_A ) if ord(_A ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) lowerCAmelCase = chr(KEYMAP["""esc"""] ) except KeyError: lowerCAmelCase = cha[1] else: lowerCAmelCase = ch.decode(_A ) else: lowerCAmelCase = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty lowerCAmelCase = sys.stdin.fileno() lowerCAmelCase = termios.tcgetattr(_A ) try: tty.setraw(_A ) lowerCAmelCase = sys.stdin.read(1 ) finally: termios.tcsetattr(_A , termios.TCSADRAIN , _A ) return ch def snake_case__ ( ) -> Tuple: '''simple docstring''' lowerCAmelCase = get_raw_chars() if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(_A ) == KEYMAP["esc"]: lowerCAmelCase = get_raw_chars() if ord(_A ) == KEYMAP["mod_int"]: lowerCAmelCase = get_raw_chars() if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(_A ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
272
0
'''simple docstring''' # Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def UpperCAmelCase_ ( __lowercase : int , __lowercase : Optional[int] , __lowercase : Union[str, Any] ) -> str: '''simple docstring''' _UpperCAmelCase = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] _UpperCAmelCase = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } _UpperCAmelCase = f'{src_lang}-{tgt_lang}' _UpperCAmelCase = f'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n' os.makedirs(__lowercase , exist_ok=__lowercase ) _UpperCAmelCase = os.path.join(__lowercase , "README.md" ) print(f'Generating {path}' ) with open(__lowercase , "w" , encoding="utf-8" ) as f: f.write(__lowercase ) # make sure we are under the root of the project __SCREAMING_SNAKE_CASE :Optional[Any] = Path(__file__).resolve().parent.parent.parent __SCREAMING_SNAKE_CASE :Any = repo_dir / '''model_cards''' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[Any] = model_name.split('''-''') __SCREAMING_SNAKE_CASE :List[str] = model_cards_dir / '''facebook''' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
22
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __lowercase = logging.get_logger(__name__) class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = ['''input_features'''] def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=16000 , __lowerCAmelCase=160 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , **__lowerCAmelCase , ): """simple docstring""" super().__init__( feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , ) lowerCAmelCase = n_fft lowerCAmelCase = hop_length lowerCAmelCase = chunk_length lowerCAmelCase = chunk_length * sampling_rate lowerCAmelCase = self.n_samples // hop_length lowerCAmelCase = sampling_rate lowerCAmelCase = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , ) def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = spectrogram( __lowerCAmelCase , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , ) lowerCAmelCase = log_spec[:, :-1] lowerCAmelCase = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0) lowerCAmelCase = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0): """simple docstring""" if attention_mask is not None: lowerCAmelCase = np.array(__lowerCAmelCase , np.intaa) lowerCAmelCase = [] for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1)): lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7) if length < normed_slice.shape[0]: lowerCAmelCase = padding_value normed_input_values.append(__lowerCAmelCase) else: lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values] return normed_input_values def __call__( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "max_length" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" f" was sampled with {self.sampling_rate} and not {sampling_rate}.") else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""") lowerCAmelCase = isinstance(__lowerCAmelCase , np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}") lowerCAmelCase = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list))) ) if is_batched: lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray): lowerCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa) elif isinstance(__lowerCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa): lowerCAmelCase = raw_speech.astype(np.floataa) # always return batch if not is_batched: lowerCAmelCase = [np.asarray([raw_speech]).T] lowerCAmelCase = BatchFeature({"""input_features""": raw_speech}) # convert into correct format for padding lowerCAmelCase = self.pad( __lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: lowerCAmelCase = self.zero_mean_unit_var_norm( padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , ) lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0) # make sure list is in array format lowerCAmelCase = padded_inputs.get("""input_features""").transpose(2 , 0 , 1) lowerCAmelCase = [self._np_extract_fbank_features(__lowerCAmelCase) for waveform in input_features[0]] if isinstance(input_features[0] , __lowerCAmelCase): lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for feature in input_features] else: lowerCAmelCase = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) lowerCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length] if return_tensors is not None: lowerCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase) return padded_inputs def a_ ( self): """simple docstring""" lowerCAmelCase = copy.deepcopy(self.__dict__) lowerCAmelCase = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
272
0
'''simple docstring''' from __future__ import annotations from typing import Any class SCREAMING_SNAKE_CASE: """simple docstring""" def __init__( self : Tuple , __snake_case : int ) -> None: UpperCAmelCase : str = num_of_nodes UpperCAmelCase : list[list[int]] = [] UpperCAmelCase : dict[int, int] = {} def A ( self : List[str] , __snake_case : int , __snake_case : int , __snake_case : int ) -> None: self.m_edges.append([u_node, v_node, weight] ) def A ( self : Union[str, Any] , __snake_case : int ) -> int: if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def A ( self : Any , __snake_case : int ) -> None: if self.m_component[u_node] != u_node: for k in self.m_component: UpperCAmelCase : int = self.find_component(__snake_case ) def A ( self : Dict , __snake_case : list[int] , __snake_case : int , __snake_case : int ) -> None: if component_size[u_node] <= component_size[v_node]: UpperCAmelCase : Any = v_node component_size[v_node] += component_size[u_node] self.set_component(__snake_case ) elif component_size[u_node] >= component_size[v_node]: UpperCAmelCase : Optional[Any] = self.find_component(__snake_case ) component_size[u_node] += component_size[v_node] self.set_component(__snake_case ) def A ( self : Optional[int] ) -> None: UpperCAmelCase : str = [] UpperCAmelCase : Any = 0 UpperCAmelCase : list[Any] = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) UpperCAmelCase : Union[str, Any] = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = edge UpperCAmelCase : List[Any] = self.m_component[u] UpperCAmelCase : Any = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): UpperCAmelCase : Union[str, Any] = [u, v, w] for edge in minimum_weight_edge: if isinstance(__snake_case , __snake_case ): UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = edge UpperCAmelCase : int = self.m_component[u] UpperCAmelCase : Optional[int] = self.m_component[v] if u_component != v_component: mst_weight += w self.union(__snake_case , __snake_case , __snake_case ) print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 UpperCAmelCase : str = [-1] * self.m_num_of_nodes print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def snake_case_ ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
23
'''simple docstring''' from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig __lowercase = logging.get_logger(__name__) __lowercase = '''T5Config''' class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : List[str] = '''mt5''' UpperCAmelCase_ : Tuple = MTaConfig class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : List[Any] = '''mt5''' UpperCAmelCase_ : int = MTaConfig class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : Tuple = '''mt5''' UpperCAmelCase_ : Union[str, Any] = MTaConfig
272
0
from __future__ import annotations def lowerCamelCase__ ( snake_case_ : list[int | str] ) -> None: create_state_space_tree(snake_case_ , [] , 0 , [0 for i in range(len(snake_case_ ) )] ) def lowerCamelCase__ ( snake_case_ : list[int | str] , snake_case_ : list[int | str] , snake_case_ : int , snake_case_ : list[int] , ) -> None: if index == len(snake_case_ ): print(snake_case_ ) return for i in range(len(snake_case_ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) __snake_case = True create_state_space_tree(snake_case_ , snake_case_ , index + 1 , snake_case_ ) current_sequence.pop() __snake_case = False snake_case_ = [3, 1, 2, 4] generate_all_permutations(sequence) snake_case_ = ["A", "B", "C"] generate_all_permutations(sequence_a)
24
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __lowercase = { '''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''', '''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''', } class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : List[str] = '''ernie_m''' UpperCAmelCase_ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self , __lowerCAmelCase = 250002 , __lowerCAmelCase = 768 , __lowerCAmelCase = 12 , __lowerCAmelCase = 12 , __lowerCAmelCase = 3072 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 514 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1E-0_5 , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ): """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = classifier_dropout lowerCAmelCase = is_decoder lowerCAmelCase = act_dropout
272
0
"""simple docstring""" from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__) UpperCAmelCase__ : Optional[int] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } UpperCAmelCase__ : int = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } UpperCAmelCase__ : Optional[int] = { 'facebook/blenderbot_small-90M': 5_1_2, } class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES __UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Union[str, Any] = BlenderbotSmallTokenizer def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="<|endoftext|>" , SCREAMING_SNAKE_CASE__="<|endoftext|>" , SCREAMING_SNAKE_CASE__="<|endoftext|>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ) -> List[str]: """simple docstring""" super().__init__( ByteLevelBPETokenizer( vocab=SCREAMING_SNAKE_CASE__ , merges=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , ) , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) SCREAMING_SNAKE_CASE__ : Dict = add_prefix_space def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.sep_token_id] SCREAMING_SNAKE_CASE__ : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
25
'''simple docstring''' import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __lowercase = logging.getLogger(__name__) class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : Any = '''sequence-classification''' def __init__( self , __lowerCAmelCase): """simple docstring""" if type(__lowerCAmelCase) == dict: lowerCAmelCase = Namespace(**__lowerCAmelCase) lowerCAmelCase = glue_output_modes[hparams.task] lowerCAmelCase = glue_tasks_num_labels[hparams.task] super().__init__(__lowerCAmelCase , __lowerCAmelCase , self.mode) def a_ ( self , **__lowerCAmelCase): """simple docstring""" return self.model(**__lowerCAmelCase) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None lowerCAmelCase = self(**__lowerCAmelCase) lowerCAmelCase = outputs[0] lowerCAmelCase = self.trainer.lr_schedulers[0]["""scheduler"""] lowerCAmelCase = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def a_ ( self): """simple docstring""" lowerCAmelCase = self.hparams lowerCAmelCase = processors[args.task]() lowerCAmelCase = processor.get_labels() for mode in ["train", "dev"]: lowerCAmelCase = self._feature_file(__lowerCAmelCase) if os.path.exists(__lowerCAmelCase) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , __lowerCAmelCase) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir) lowerCAmelCase = ( processor.get_dev_examples(args.data_dir) if mode == """dev""" else processor.get_train_examples(args.data_dir) ) lowerCAmelCase = convert_examples_to_features( __lowerCAmelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("""Saving features into cached file %s""" , __lowerCAmelCase) torch.save(__lowerCAmelCase , __lowerCAmelCase) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False): """simple docstring""" lowerCAmelCase = """dev""" if mode == """test""" else mode lowerCAmelCase = self._feature_file(__lowerCAmelCase) logger.info("""Loading features from cached file %s""" , __lowerCAmelCase) lowerCAmelCase = torch.load(__lowerCAmelCase) lowerCAmelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long) lowerCAmelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long) lowerCAmelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long) if self.hparams.glue_output_mode == "classification": lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.long) elif self.hparams.glue_output_mode == "regression": lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.float) return DataLoader( TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase) , batch_size=__lowerCAmelCase , shuffle=__lowerCAmelCase , ) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None lowerCAmelCase = self(**__lowerCAmelCase) lowerCAmelCase , lowerCAmelCase = outputs[:2] lowerCAmelCase = logits.detach().cpu().numpy() lowerCAmelCase = inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = torch.stack([x["""val_loss"""] for x in outputs]).mean().detach().cpu().item() lowerCAmelCase = np.concatenate([x["""pred"""] for x in outputs] , axis=0) if self.hparams.glue_output_mode == "classification": lowerCAmelCase = np.argmax(__lowerCAmelCase , axis=1) elif self.hparams.glue_output_mode == "regression": lowerCAmelCase = np.squeeze(__lowerCAmelCase) lowerCAmelCase = np.concatenate([x["""target"""] for x in outputs] , axis=0) lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])] lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])] lowerCAmelCase = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCAmelCase , __lowerCAmelCase)} lowerCAmelCase = dict(results.items()) lowerCAmelCase = results return ret, preds_list, out_label_list def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase) lowerCAmelCase = ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase) lowerCAmelCase = ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def a_ ( __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase) parser.add_argument( """--max_seq_length""" , default=128 , type=__lowerCAmelCase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--task""" , default="""""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The GLUE task to run""" , ) parser.add_argument( """--gpus""" , default=0 , type=__lowerCAmelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""") return parser def snake_case__ ( ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase = argparse.ArgumentParser() add_generic_args(_A , os.getcwd() ) lowerCAmelCase = GLUETransformer.add_model_specific_args(_A , os.getcwd() ) lowerCAmelCase = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: lowerCAmelCase = os.path.join( """./results""" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , ) os.makedirs(args.output_dir ) lowerCAmelCase = GLUETransformer(_A ) lowerCAmelCase = generic_train(_A , _A ) # Optionally, predict on dev set and write to output_dir if args.do_predict: lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=_A ) ) lowerCAmelCase = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_A ) if __name__ == "__main__": main()
272
0
from ..utils import DummyObject, requires_backends class lowercase ( metaclass=UpperCamelCase__ ): _a = ["note_seq"] def __init__( self , *_a , **_a ) -> Dict: requires_backends(self , ["""note_seq"""] ) @classmethod def a__ ( cls , *_a , **_a ) -> Optional[int]: requires_backends(cls , ["""note_seq"""] ) @classmethod def a__ ( cls , *_a , **_a ) -> Tuple: requires_backends(cls , ["""note_seq"""] )
26
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor __lowercase = logging.get_logger(__name__) class a__( lowerCAmelCase__ ): '''simple docstring''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" warnings.warn( """The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use DeformableDetrImageProcessor instead.""" , __lowerCAmelCase , ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
272
0
'''simple docstring''' import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() __lowercase : Union[str, Any] = logging.get_logger(__name__) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ): __a : Any = UniSpeechSatForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE ) __a : Union[str, Any] = downstream_dict['projector.weight'] __a : Dict = downstream_dict['projector.bias'] __a : int = downstream_dict['model.post_net.linear.weight'] __a : List[str] = downstream_dict['model.post_net.linear.bias'] return model def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str ): __a : Tuple = UniSpeechSatForAudioFrameClassification.from_pretrained(_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE ) __a : Tuple = downstream_dict['model.linear.weight'] __a : str = downstream_dict['model.linear.bias'] return model def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple ): __a : Union[str, Any] = UniSpeechSatForXVector.from_pretrained(_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE ) __a : List[Any] = downstream_dict['connector.weight'] __a : Union[str, Any] = downstream_dict['connector.bias'] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __a : List[Any] = downstream_dict[ F"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] __a : str = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] __a : Optional[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight'] __a : List[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias'] __a : Optional[int] = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight'] __a : Union[str, Any] = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias'] __a : List[str] = downstream_dict['objective.W'] return model @torch.no_grad() def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ): __a : Tuple = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) __a : List[Any] = checkpoint['Downstream'] __a : Any = UniSpeechSatConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) __a : Tuple = WavaVecaFeatureExtractor.from_pretrained( _SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE ) __a : str = hf_config.architectures[0] if arch.endswith('ForSequenceClassification' ): __a : Any = convert_classification(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif arch.endswith('ForAudioFrameClassification' ): __a : Union[str, Any] = convert_diarization(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif arch.endswith('ForXVector' ): __a : List[Any] = convert_xvector(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: __a : int = checkpoint['Featurizer']['weights'] hf_feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase : int = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') __lowercase : Optional[int] = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
27
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowercase = { '''configuration_instructblip''': [ '''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InstructBlipConfig''', '''InstructBlipQFormerConfig''', '''InstructBlipVisionConfig''', ], '''processing_instructblip''': ['''InstructBlipProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InstructBlipQFormerModel''', '''InstructBlipPreTrainedModel''', '''InstructBlipForConditionalGeneration''', '''InstructBlipVisionModel''', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
272
0
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters _lowerCamelCase : int = (720, 1280) # Height, Width _lowerCamelCase : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it. _lowerCamelCase : int = 1 / 100 _lowerCamelCase : Optional[Any] = "" _lowerCamelCase : str = "" _lowerCamelCase : str = "" _lowerCamelCase : str = 250 def __lowerCamelCase ( ) -> None: """simple docstring""" UpperCamelCase , UpperCamelCase = get_dataset(A__ , A__ ) for index in range(A__ ): UpperCamelCase = random.sample(range(len(A__ ) ) , 4 ) UpperCamelCase , UpperCamelCase , UpperCamelCase = update_image_and_anno( A__ , A__ , A__ , A__ , A__ , filter_scale=A__ , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' UpperCamelCase = random_chars(32 ) UpperCamelCase = path.split(os.sep )[-1].rsplit('.' , 1 )[0] UpperCamelCase = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}""" cva.imwrite(F"""{file_root}.jpg""" , A__ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" ) UpperCamelCase = [] for anno in new_annos: UpperCamelCase = anno[3] - anno[1] UpperCamelCase = anno[4] - anno[2] UpperCamelCase = anno[1] + width / 2 UpperCamelCase = anno[2] + height / 2 UpperCamelCase = F"""{anno[0]} {x_center} {y_center} {width} {height}""" annos_list.append(A__ ) with open(F"""{file_root}.txt""" , 'w' ) as outfile: outfile.write('\n'.join(line for line in annos_list ) ) def __lowerCamelCase ( A__ , A__ ) -> tuple[list, list]: """simple docstring""" UpperCamelCase = [] UpperCamelCase = [] for label_file in glob.glob(os.path.join(A__ , '*.txt' ) ): UpperCamelCase = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0] with open(A__ ) as in_file: UpperCamelCase = in_file.readlines() UpperCamelCase = os.path.join(A__ , F"""{label_name}.jpg""" ) UpperCamelCase = [] for obj_list in obj_lists: UpperCamelCase = obj_list.rstrip('\n' ).split(' ' ) UpperCamelCase = float(obj[1] ) - float(obj[3] ) / 2 UpperCamelCase = float(obj[2] ) - float(obj[4] ) / 2 UpperCamelCase = float(obj[1] ) + float(obj[3] ) / 2 UpperCamelCase = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(A__ ) labels.append(A__ ) return img_paths, labels def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__ = 0.0 , ) -> tuple[list, list, str]: """simple docstring""" UpperCamelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) UpperCamelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) UpperCamelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) UpperCamelCase = int(scale_x * output_size[1] ) UpperCamelCase = int(scale_y * output_size[0] ) UpperCamelCase = [] UpperCamelCase = [] for i, index in enumerate(A__ ): UpperCamelCase = all_img_list[index] path_list.append(A__ ) UpperCamelCase = all_annos[index] UpperCamelCase = cva.imread(A__ ) if i == 0: # top-left UpperCamelCase = cva.resize(A__ , (divid_point_x, divid_point_y) ) UpperCamelCase = img for bbox in img_annos: UpperCamelCase = bbox[1] * scale_x UpperCamelCase = bbox[2] * scale_y UpperCamelCase = bbox[3] * scale_x UpperCamelCase = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right UpperCamelCase = cva.resize(A__ , (output_size[1] - divid_point_x, divid_point_y) ) UpperCamelCase = img for bbox in img_annos: UpperCamelCase = scale_x + bbox[1] * (1 - scale_x) UpperCamelCase = bbox[2] * scale_y UpperCamelCase = scale_x + bbox[3] * (1 - scale_x) UpperCamelCase = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left UpperCamelCase = cva.resize(A__ , (divid_point_x, output_size[0] - divid_point_y) ) UpperCamelCase = img for bbox in img_annos: UpperCamelCase = bbox[1] * scale_x UpperCamelCase = scale_y + bbox[2] * (1 - scale_y) UpperCamelCase = bbox[3] * scale_x UpperCamelCase = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right UpperCamelCase = cva.resize( A__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) UpperCamelCase = img for bbox in img_annos: UpperCamelCase = scale_x + bbox[1] * (1 - scale_x) UpperCamelCase = scale_y + bbox[2] * (1 - scale_y) UpperCamelCase = scale_x + bbox[3] * (1 - scale_x) UpperCamelCase = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: UpperCamelCase = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def __lowerCamelCase ( A__ ) -> str: """simple docstring""" assert number_char > 1, "The number of character should greater than 1" UpperCamelCase = ascii_lowercase + digits return "".join(random.choice(A__ ) for _ in range(A__ ) ) if __name__ == "__main__": main() print("DONE ✅")
28
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class a__( unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Dict = ViTImageProcessor if is_vision_available() else None @property def a_ ( self): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def a_ ( self): """simple docstring""" lowerCAmelCase = (3, 32, 128) lowerCAmelCase = tempfile.mkdtemp() # fmt: off lowerCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase)))) lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(__lowerCAmelCase) + """\n""") lowerCAmelCase = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 128}, } lowerCAmelCase = os.path.join(self.tmpdirname , __lowerCAmelCase) with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase) def a_ ( self , **__lowerCAmelCase): """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase) def a_ ( self , **__lowerCAmelCase): """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase) def a_ ( self): """simple docstring""" shutil.rmtree(self.tmpdirname) def a_ ( self): """simple docstring""" lowerCAmelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta) lowerCAmelCase = Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1)) return image_input def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_image_processor() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) processor.save_pretrained(self.tmpdirname) lowerCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab()) self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor.image_processor , __lowerCAmelCase) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_image_processor() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) processor.save_pretrained(self.tmpdirname) lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""") lowerCAmelCase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0) lowerCAmelCase = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , __lowerCAmelCase) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""np""") lowerCAmelCase = processor(images=__lowerCAmelCase , return_tensors="""np""") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = """test""" lowerCAmelCase = processor(text=__lowerCAmelCase) lowerCAmelCase = tokenizer(__lowerCAmelCase) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = """test""" lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase) self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""]) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase): processor() def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase = processor.char_decode(__lowerCAmelCase) lowerCAmelCase = tokenizer.batch_decode(__lowerCAmelCase) lowerCAmelCase = [seq.replace(""" """ , """""") for seq in decoded_tok] self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = None lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase) self.assertListEqual(list(inputs.keys()) , processor.model_input_names) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = torch.randn(1 , 27 , 38) lowerCAmelCase = torch.randn(1 , 27 , 50257) lowerCAmelCase = torch.randn(1 , 27 , 30522) lowerCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input]) self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
272
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class lowerCamelCase (unittest.TestCase ): '''simple docstring''' _snake_case : str = ViTImageProcessor if is_vision_available() else None @property def __UpperCAmelCase ( self ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def __UpperCAmelCase ( self ) -> List[Any]: UpperCAmelCase_ : str = (3, 3_2, 1_2_8) UpperCAmelCase_ : int = tempfile.mkdtemp() # fmt: off UpperCAmelCase_ : int = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] # fmt: on UpperCAmelCase_ : Optional[int] = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) ) UpperCAmelCase_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(_UpperCamelCase ) + '\n' ) UpperCAmelCase_ : Dict = { 'do_normalize': False, 'do_resize': True, 'image_processor_type': 'ViTImageProcessor', 'resample': 3, 'size': {'height': 3_2, 'width': 1_2_8}, } UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , _UpperCamelCase ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(_UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Dict: return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase ) def __UpperCAmelCase ( self , **_UpperCamelCase ) -> List[Any]: return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase ) def __UpperCAmelCase ( self ) -> Union[str, Any]: shutil.rmtree(self.tmpdirname ) def __UpperCAmelCase ( self ) -> List[Any]: UpperCAmelCase_ : List[str] = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta ) UpperCAmelCase_ : Any = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) ) return image_input def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : int = self.get_tokenizer() UpperCAmelCase_ : int = self.get_image_processor() UpperCAmelCase_ : Optional[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = self.get_tokenizer() UpperCAmelCase_ : List[str] = self.get_image_processor() UpperCAmelCase_ : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) UpperCAmelCase_ : Optional[int] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0 ) UpperCAmelCase_ : List[str] = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCamelCase , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> int: UpperCAmelCase_ : Union[str, Any] = self.get_image_processor() UpperCAmelCase_ : Tuple = self.get_tokenizer() UpperCAmelCase_ : str = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) UpperCAmelCase_ : int = self.prepare_image_inputs() UpperCAmelCase_ : List[Any] = image_processor(_UpperCamelCase , return_tensors='np' ) UpperCAmelCase_ : Tuple = processor(images=_UpperCamelCase , return_tensors='np' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCAmelCase ( self ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.get_image_processor() UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : str = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) UpperCAmelCase_ : List[str] = 'test' UpperCAmelCase_ : Union[str, Any] = processor(text=_UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = tokenizer(_UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCAmelCase ( self ) -> Optional[Any]: UpperCAmelCase_ : Tuple = self.get_image_processor() UpperCAmelCase_ : Any = self.get_tokenizer() UpperCAmelCase_ : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) UpperCAmelCase_ : str = 'test' UpperCAmelCase_ : str = self.prepare_image_inputs() UpperCAmelCase_ : str = processor(text=_UpperCamelCase , images=_UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'labels'] ) # test if it raises when no input is passed with pytest.raises(_UpperCamelCase ): processor() def __UpperCAmelCase ( self ) -> List[Any]: UpperCAmelCase_ : List[Any] = self.get_image_processor() UpperCAmelCase_ : str = self.get_tokenizer() UpperCAmelCase_ : Optional[int] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase_ : Tuple = processor.char_decode(_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = tokenizer.batch_decode(_UpperCamelCase ) UpperCAmelCase_ : Dict = [seq.replace(' ' , '' ) for seq in decoded_tok] self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) def __UpperCAmelCase ( self ) -> Tuple: UpperCAmelCase_ : Dict = self.get_image_processor() UpperCAmelCase_ : str = self.get_tokenizer() UpperCAmelCase_ : Tuple = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : str = self.prepare_image_inputs() UpperCAmelCase_ : Optional[int] = processor(text=_UpperCamelCase , images=_UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def __UpperCAmelCase ( self ) -> str: UpperCAmelCase_ : Any = self.get_image_processor() UpperCAmelCase_ : Optional[Any] = self.get_tokenizer() UpperCAmelCase_ : Optional[int] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = torch.randn(1 , 2_7 , 3_8 ) UpperCAmelCase_ : List[Any] = torch.randn(1 , 2_7 , 5_0_2_5_7 ) UpperCAmelCase_ : List[Any] = torch.randn(1 , 2_7 , 3_0_5_2_2 ) UpperCAmelCase_ : Any = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] )
29
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class a__( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Tuple = XLMRobertaTokenizer UpperCAmelCase_ : int = XLMRobertaTokenizerFast UpperCAmelCase_ : List[str] = True UpperCAmelCase_ : Optional[int] = True def a_ ( self): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase) tokenizer.save_pretrained(self.tmpdirname) def a_ ( self): """simple docstring""" lowerCAmelCase = """<pad>""" lowerCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase) , __lowerCAmelCase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase) , __lowerCAmelCase) def a_ ( self): """simple docstring""" lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , """<s>""") self.assertEqual(vocab_keys[1] , """<pad>""") self.assertEqual(vocab_keys[-1] , """<mask>""") self.assertEqual(len(__lowerCAmelCase) , 1002) def a_ ( self): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1002) def a_ ( self): """simple docstring""" lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase) lowerCAmelCase = tokenizer.tokenize("""This is a test""") self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""") self.assertListEqual( __lowerCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase) self.assertListEqual( __lowerCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) lowerCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase) self.assertListEqual( __lowerCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def a_ ( self): """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowerCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files)) lowerCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f) self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase) # Checks everything loads correctly in the same way lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__lowerCAmelCase) # Save tokenizer rust, legacy_format=True lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase) lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase) # Checks it save with the same files self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase) # Checks everything loads correctly in the same way lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase)) shutil.rmtree(__lowerCAmelCase) # Save tokenizer rust, legacy_format=False lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase) lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase)) shutil.rmtree(__lowerCAmelCase) @cached_property def a_ ( self): """simple docstring""" return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""") def a_ ( self): """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__lowerCAmelCase , f.name) lowerCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=__lowerCAmelCase) lowerCAmelCase = pickle.dumps(__lowerCAmelCase) pickle.loads(__lowerCAmelCase) def a_ ( self): """simple docstring""" if not self.test_rust_tokenizer: return lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_rust_tokenizer() lowerCAmelCase = """I was born in 92000, and this is falsé.""" lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase) lowerCAmelCase = rust_tokenizer.tokenize(__lowerCAmelCase) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase) lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) lowerCAmelCase = self.get_rust_tokenizer() lowerCAmelCase = tokenizer.encode(__lowerCAmelCase) lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) @slow def a_ ( self): """simple docstring""" lowerCAmelCase = """Hello World!""" lowerCAmelCase = [0, 35378, 6661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase)) @slow def a_ ( self): """simple docstring""" lowerCAmelCase = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) lowerCAmelCase = [ 0, 3293, 83, 10, 4552, 4989, 7986, 678, 10, 5915, 111, 179459, 124850, 4, 6044, 237, 12, 6, 5, 6, 4, 6780, 705, 15, 1388, 44, 378, 10114, 711, 152, 20, 6, 5, 22376, 642, 1221, 15190, 34153, 450, 5608, 959, 1119, 57702, 136, 186, 47, 1098, 29367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6044, 237, 6284, 50901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase)) @slow def a_ ( self): """simple docstring""" lowerCAmelCase = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCAmelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
272
0
def a ( snake_case__: int ): '''simple docstring''' lowercase_ = [0] * len(snake_case__ ) lowercase_ = [] lowercase_ = [1] * len(snake_case__ ) for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(snake_case__ ) ): if indegree[i] == 0: queue.append(snake_case__ ) while queue: lowercase_ = queue.pop(0 ) for x in graph[vertex]: indegree[x] -= 1 if long_dist[vertex] + 1 > long_dist[x]: lowercase_ = long_dist[vertex] + 1 if indegree[x] == 0: queue.append(snake_case__ ) print(max(snake_case__ ) ) # Adjacency list of Graph __a = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []} longest_distance(graph)
30
'''simple docstring''' def snake_case__ ( _A: int , _A: int ) -> int: '''simple docstring''' while a != 0: lowerCAmelCase , lowerCAmelCase = b % a, a return b def snake_case__ ( _A: int , _A: int ) -> int: '''simple docstring''' if gcd(_A , _A ) != 1: lowerCAmelCase = f"mod inverse of {a!r} and {m!r} does not exist" raise ValueError(_A ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 0, a lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0, 1, m while va != 0: lowerCAmelCase = ua // va lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
272
0
'''simple docstring''' __SCREAMING_SNAKE_CASE : Optional[Any] = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ __SCREAMING_SNAKE_CASE : Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}] __SCREAMING_SNAKE_CASE : Union[str, Any] = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
31
'''simple docstring''' import math import flax.linen as nn import jax.numpy as jnp def snake_case__ ( _A: jnp.ndarray , _A: int , _A: float = 1 , _A: float = 1 , _A: float = 1.0e4 , _A: bool = False , _A: float = 1.0 , ) -> jnp.ndarray: '''simple docstring''' assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even" lowerCAmelCase = float(embedding_dim // 2 ) lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(_A , dtype=jnp.floataa ) * -log_timescale_increment ) lowerCAmelCase = jnp.expand_dims(_A , 1 ) * jnp.expand_dims(_A , 0 ) # scale embeddings lowerCAmelCase = scale * emb if flip_sin_to_cos: lowerCAmelCase = jnp.concatenate([jnp.cos(_A ), jnp.sin(_A )] , axis=1 ) else: lowerCAmelCase = jnp.concatenate([jnp.sin(_A ), jnp.cos(_A )] , axis=1 ) lowerCAmelCase = jnp.reshape(_A , [jnp.shape(_A )[0], embedding_dim] ) return signal class a__( nn.Module ): '''simple docstring''' UpperCAmelCase_ : int = 3_2 UpperCAmelCase_ : jnp.dtype = jnp.floataa @nn.compact def __call__( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""")(__lowerCAmelCase) lowerCAmelCase = nn.silu(__lowerCAmelCase) lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""")(__lowerCAmelCase) return temb class a__( nn.Module ): '''simple docstring''' UpperCAmelCase_ : int = 3_2 UpperCAmelCase_ : bool = False UpperCAmelCase_ : float = 1 @nn.compact def __call__( self , __lowerCAmelCase): """simple docstring""" return get_sinusoidal_embeddings( __lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
272
0
import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : int , __A : Optional[int] ) -> Tuple: """simple docstring""" a_ : Tuple = os.path.abspath(__A ) logger.info(F"""Converting TensorFlow checkpoint from {tf_path}""" ) # Load weights from TF model a_ : List[str] = tf.train.list_variables(__A ) a_ : Dict = [] a_ : str = [] a_ : List[Any] = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") a_ : Union[str, Any] = full_name.split('/' ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(F"""Skipping non-model layer {full_name}""" ) continue if "optimizer" in full_name: logger.info(F"""Skipping optimization layer {full_name}""" ) continue if name[0] == "model": # ignore initial 'model' a_ : Dict = name[1:] # figure out how many levels deep the name is a_ : str = 0 for _name in name: if _name.startswith('layer_with_weights' ): depth += 1 else: break layer_depth.append(__A ) # read data a_ : Any = tf.train.load_variable(__A , __A ) names.append('/'.join(__A ) ) arrays.append(__A ) logger.info(F"""Read a total of {len(__A ):,} layers""" ) # Sanity check if len(set(__A ) ) != 1: raise ValueError(F"""Found layer names with different depths (layer depth {list(set(__A ) )})""" ) a_ : Union[str, Any] = list(set(__A ) )[0] if layer_depth != 1: raise ValueError( 'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP' ' heads.' ) # convert layers logger.info('Converting weights...' ) for full_name, array in zip(__A , __A ): a_ : List[str] = full_name.split('/' ) a_ : List[str] = model a_ : int = [] for i, m_name in enumerate(__A ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith('layer_with_weights' ): a_ : Optional[Any] = int(m_name.split('-' )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(['embeddings', 'LayerNorm'] ) a_ : List[str] = getattr(__A , 'embeddings' ) a_ : Any = getattr(__A , 'LayerNorm' ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(['encoder', 'layer', str(layer_num - 4 )] ) a_ : Optional[int] = getattr(__A , 'encoder' ) a_ : Union[str, Any] = getattr(__A , 'layer' ) a_ : List[str] = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(['pooler', 'dense'] ) a_ : str = getattr(__A , 'pooler' ) a_ : List[Any] = getattr(__A , 'dense' ) elif m_name == "embeddings": trace.append('embeddings' ) a_ : Optional[int] = getattr(__A , 'embeddings' ) if layer_num == 0: trace.append('word_embeddings' ) a_ : int = getattr(__A , 'word_embeddings' ) elif layer_num == 1: trace.append('position_embeddings' ) a_ : List[str] = getattr(__A , 'position_embeddings' ) elif layer_num == 2: trace.append('token_type_embeddings' ) a_ : str = getattr(__A , 'token_type_embeddings' ) else: raise ValueError(F"""Unknown embedding layer with name {full_name}""" ) trace.append('weight' ) a_ : Any = getattr(__A , 'weight' ) elif m_name == "_attention_layer": # self-attention layer trace.extend(['attention', 'self'] ) a_ : Dict = getattr(__A , 'attention' ) a_ : Optional[int] = getattr(__A , 'self' ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(['attention', 'output', 'LayerNorm'] ) a_ : Optional[int] = getattr(__A , 'attention' ) a_ : Dict = getattr(__A , 'output' ) a_ : Any = getattr(__A , 'LayerNorm' ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(['attention', 'output', 'dense'] ) a_ : Optional[int] = getattr(__A , 'attention' ) a_ : int = getattr(__A , 'output' ) a_ : List[Any] = getattr(__A , 'dense' ) elif m_name == "_output_dense": # output dense trace.extend(['output', 'dense'] ) a_ : Any = getattr(__A , 'output' ) a_ : Any = getattr(__A , 'dense' ) elif m_name == "_output_layer_norm": # output dense trace.extend(['output', 'LayerNorm'] ) a_ : Tuple = getattr(__A , 'output' ) a_ : Any = getattr(__A , 'LayerNorm' ) elif m_name == "_key_dense": # attention key trace.append('key' ) a_ : Optional[int] = getattr(__A , 'key' ) elif m_name == "_query_dense": # attention query trace.append('query' ) a_ : Tuple = getattr(__A , 'query' ) elif m_name == "_value_dense": # attention value trace.append('value' ) a_ : Any = getattr(__A , 'value' ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(['intermediate', 'dense'] ) a_ : Any = getattr(__A , 'intermediate' ) a_ : Optional[int] = getattr(__A , 'dense' ) elif m_name == "_output_layer_norm": # output layer norm trace.append('output' ) a_ : Optional[int] = getattr(__A , 'output' ) # weights & biases elif m_name in ["bias", "beta"]: trace.append('bias' ) a_ : Any = getattr(__A , 'bias' ) elif m_name in ["kernel", "gamma"]: trace.append('weight' ) a_ : str = getattr(__A , 'weight' ) else: logger.warning(F"""Ignored {m_name}""" ) # for certain layers reshape is necessary a_ : Union[str, Any] = '.'.join(__A ) if re.match(R'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , __A ) or re.match( R'(\S+)\.attention\.output\.dense\.weight' , __A ): a_ : Dict = array.reshape(pointer.data.shape ) if "kernel" in full_name: a_ : Optional[Any] = array.transpose() if pointer.shape == array.shape: a_ : Tuple = torch.from_numpy(__A ) else: raise ValueError( F"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:""" F""" {array.shape}""" ) logger.info(F"""Successfully set variable {full_name} to PyTorch layer {trace}""" ) return model def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Optional[int] , __A : List[str] ) -> List[Any]: """simple docstring""" logger.info(F"""Loading model based on config from {config_path}...""" ) a_ : str = BertConfig.from_json_file(__A ) a_ : Optional[Any] = BertModel(__A ) # Load weights from checkpoint logger.info(F"""Loading weights from checkpoint {tf_checkpoint_path}...""" ) load_tfa_weights_in_bert(__A , __A , __A ) # Save pytorch-model logger.info(F"""Saving PyTorch model to {pytorch_dump_path}...""" ) torch.save(model.state_dict() , __A ) if __name__ == "__main__": UpperCAmelCase_ : Any = argparse.ArgumentParser() parser.add_argument( '--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.' ) parser.add_argument( '--bert_config_file', type=str, required=True, help='The config json file corresponding to the BERT model. This specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', type=str, required=True, help='Path to the output PyTorch model (must include filename).', ) UpperCAmelCase_ : Tuple = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
32
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowercase = { '''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NezhaForNextSentencePrediction''', '''NezhaForMaskedLM''', '''NezhaForPreTraining''', '''NezhaForMultipleChoice''', '''NezhaForQuestionAnswering''', '''NezhaForSequenceClassification''', '''NezhaForTokenClassification''', '''NezhaModel''', '''NezhaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
272
0
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class _UpperCAmelCase ( _A , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Any = BioGptTokenizer SCREAMING_SNAKE_CASE_ : int = False def A ( self : Any ) -> Tuple: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase_ : Dict = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] lowercase_ : Dict = dict(zip(A , range(len(A ) ) ) ) lowercase_ : List[str] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] lowercase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(A ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(A ) ) def A ( self : Tuple , A : Dict ) -> int: lowercase_ : List[str] = '''lower newer''' lowercase_ : List[str] = '''lower newer''' return input_text, output_text def A ( self : Any ) -> str: lowercase_ : Dict = BioGptTokenizer(self.vocab_file , self.merges_file ) lowercase_ : List[Any] = '''lower''' lowercase_ : Dict = ['''low''', '''er</w>'''] lowercase_ : Any = tokenizer.tokenize(A ) self.assertListEqual(A , A ) lowercase_ : List[Any] = tokens + ['''<unk>'''] lowercase_ : Dict = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A ) @slow def A ( self : int ) -> List[str]: lowercase_ : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) lowercase_ : int = tokenizer.encode('''sequence builders''' , add_special_tokens=A ) lowercase_ : Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A ) lowercase_ : Tuple = tokenizer.build_inputs_with_special_tokens(A ) lowercase_ : Tuple = tokenizer.build_inputs_with_special_tokens(A , A ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
33
'''simple docstring''' from math import sqrt def snake_case__ ( _A: int = 1000000 ) -> int: '''simple docstring''' lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 42 while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_A , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f'{solution() = }')
272
0
'''simple docstring''' from typing import List from .keymap import KEYMAP, get_character def snake_case_ (_a : str ): def decorator(_a : str ): UpperCAmelCase = getattr(_a , '''handle_key''' , [] ) handle += [key] setattr(_a , '''handle_key''' , _a ) return func return decorator def snake_case_ (*_a : List[str] ): def decorator(_a : Optional[int] ): UpperCAmelCase = getattr(_a , '''handle_key''' , [] ) handle += keys setattr(_a , '''handle_key''' , _a ) return func return decorator class _a ( __a ): def __new__( cls : Tuple , lowercase : Tuple , lowercase : Optional[int] , lowercase : Union[str, Any] ): '''simple docstring''' UpperCAmelCase = super().__new__(cls , lowercase , lowercase , lowercase ) if not hasattr(lowercase , '''key_handler''' ): setattr(lowercase , '''key_handler''' , {} ) setattr(lowercase , '''handle_input''' , KeyHandler.handle_input ) for value in attrs.values(): UpperCAmelCase = getattr(lowercase , '''handle_key''' , [] ) for key in handled_keys: UpperCAmelCase = value return new_cls @staticmethod def A ( cls : List[str] ): '''simple docstring''' UpperCAmelCase = get_character() if char != KEYMAP["undefined"]: UpperCAmelCase = ord(lowercase ) UpperCAmelCase = cls.key_handler.get(lowercase ) if handler: UpperCAmelCase = char return handler(cls ) else: return None def snake_case_ (cls : int ): return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
34
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowercase = { '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
272
0
'''simple docstring''' import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging __a = logging.get_logger(__name__) logging.set_verbosity_info() def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Any: if "xprophetnet" in prophetnet_checkpoint_path: snake_case__ : Any = XLMProphetNetForConditionalGenerationOld.from_pretrained(_lowerCAmelCase ) snake_case__ , snake_case__ : Union[str, Any] = XLMProphetNetForConditionalGeneration.from_pretrained( _lowerCAmelCase , output_loading_info=_lowerCAmelCase ) else: snake_case__ : str = ProphetNetForConditionalGenerationOld.from_pretrained(_lowerCAmelCase ) snake_case__ , snake_case__ : Tuple = ProphetNetForConditionalGeneration.from_pretrained( _lowerCAmelCase , output_loading_info=_lowerCAmelCase ) snake_case__ : List[Any] = ["""key_proj""", """value_proj""", """query_proj"""] snake_case__ : Optional[Any] = { """self_attn""": """ngram_self_attn""", """cross_attn""": """encoder_attn""", """cross_attn_layer_norm""": """encoder_attn_layer_norm""", """feed_forward_layer_norm""": """final_layer_norm""", """feed_forward""": """""", """intermediate""": """fc1""", """output""": """fc2""", """key_proj""": """k_proj""", """query_proj""": """q_proj""", """value_proj""": """v_proj""", """word_embeddings""": """embed_tokens""", """embeddings_layer_norm""": """emb_layer_norm""", """relative_pos_embeddings""": """relative_linear""", """ngram_embeddings""": """ngram_input_embed""", """position_embeddings""": """embed_positions""", } for key in loading_info["missing_keys"]: snake_case__ : Optional[Any] = key.split(""".""" ) if attributes[0] == "lm_head": snake_case__ : List[str] = prophet snake_case__ : Optional[int] = prophet_old else: snake_case__ : int = prophet.prophetnet snake_case__ : Tuple = prophet_old.model snake_case__ : Optional[Any] = False for attribute in attributes: if attribute in mapping: snake_case__ : List[str] = mapping[attribute] if not hasattr(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) > 0: snake_case__ : int = attribute elif hasattr(_lowerCAmelCase , _lowerCAmelCase ): snake_case__ : List[str] = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" snake_case__ : Dict = old_model.weight logger.info(f"{attribute} is initialized." ) snake_case__ : str = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" snake_case__ : Optional[int] = old_model.bias logger.info(f"{attribute} is initialized" ) snake_case__ : str = True break elif attribute in special_keys and hasattr(_lowerCAmelCase , """in_proj_weight""" ): snake_case__ : Union[str, Any] = old_model.in_proj_weight.shape[0] // 3 snake_case__ : int = getattr(_lowerCAmelCase , _lowerCAmelCase ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": snake_case__ : int = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) snake_case__ : Dict = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": snake_case__ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) snake_case__ : List[str] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": snake_case__ : Dict = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) snake_case__ : Optional[Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) snake_case__ : Optional[Any] = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." snake_case__ : Any = nn.Parameter(old_model.embed_positions.weight[:512, :] ) snake_case__ : List[str] = True break if attribute.isdigit(): snake_case__ : Optional[int] = model[int(_lowerCAmelCase )] snake_case__ : Dict = old_model[int(_lowerCAmelCase )] else: snake_case__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ) if old_attribute == "": snake_case__ : List[Any] = old_model else: if not hasattr(_lowerCAmelCase , _lowerCAmelCase ): raise ValueError(f"{old_model} does not have {old_attribute}" ) snake_case__ : str = getattr(_lowerCAmelCase , _lowerCAmelCase ) if not is_key_init: raise ValueError(f"{key} was not correctly initialized!" ) print(f"Saving model to {pytorch_dump_folder_path}" ) prophet.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __a = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
35
'''simple docstring''' import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class a__( unittest.TestCase ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , ): """simple docstring""" lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18} lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = num_channels lowerCAmelCase = image_size lowerCAmelCase = min_resolution lowerCAmelCase = max_resolution lowerCAmelCase = do_resize lowerCAmelCase = size lowerCAmelCase = do_normalize lowerCAmelCase = image_mean lowerCAmelCase = image_std def a_ ( self): """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class a__( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Tuple = DPTImageProcessor if is_vision_available() else None def a_ ( self): """simple docstring""" lowerCAmelCase = DPTImageProcessingTester(self) @property def a_ ( self): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__lowerCAmelCase , """image_mean""")) self.assertTrue(hasattr(__lowerCAmelCase , """image_std""")) self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize""")) self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""")) self.assertTrue(hasattr(__lowerCAmelCase , """size""")) def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18}) lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42}) def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict) # create random PIL images lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
272
0
_snake_case = 0 # The first color of the flag. _snake_case = 1 # The second color of the flag. _snake_case = 2 # The third color of the flag. _snake_case = (red, white, blue) def A ( _lowerCamelCase ): '''simple docstring''' if not sequence: return [] if len(_lowerCamelCase ) == 1: return list(_lowerCamelCase ) _lowerCAmelCase : Any = 0 _lowerCAmelCase : str = len(_lowerCamelCase ) - 1 _lowerCAmelCase : Union[str, Any] = 0 while mid <= high: if sequence[mid] == colors[0]: _lowerCAmelCase , _lowerCAmelCase : str = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: _lowerCAmelCase , _lowerCAmelCase : List[Any] = sequence[high], sequence[mid] high -= 1 else: _lowerCAmelCase : List[str] = F"The elements inside the sequence must contains only {colors} values" raise ValueError(_lowerCamelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod() _snake_case = input("Enter numbers separated by commas:\n").strip() _snake_case = [int(item.strip()) for item in user_input.split(",")] print(f'''{dutch_national_flag_sort(unsorted)}''')
36
'''simple docstring''' from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def snake_case__ ( _A: Union[str, Any] , _A: Tuple , _A: Any=1e-12 ) -> str: '''simple docstring''' lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T return jnp.matmul(_A , norm_emb_a.T ) class a__( nn.Module ): '''simple docstring''' UpperCAmelCase_ : CLIPConfig UpperCAmelCase_ : jnp.dtype = jnp.floataa def a_ ( self): """simple docstring""" lowerCAmelCase = FlaxCLIPVisionModule(self.config.vision_config) lowerCAmelCase = nn.Dense(self.config.projection_dim , use_bias=__lowerCAmelCase , dtype=self.dtype) lowerCAmelCase = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim)) lowerCAmelCase = self.param( """special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim)) lowerCAmelCase = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,)) lowerCAmelCase = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,)) def __call__( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = self.vision_model(__lowerCAmelCase)[1] lowerCAmelCase = self.visual_projection(__lowerCAmelCase) lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.special_care_embeds) lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.concept_embeds) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs lowerCAmelCase = 0.0 lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment lowerCAmelCase = jnp.round(__lowerCAmelCase , 3) lowerCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCAmelCase) # Use a lower threshold if an image has any special care concept lowerCAmelCase = is_special_care * 0.01 lowerCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment lowerCAmelCase = jnp.round(__lowerCAmelCase , 3) lowerCAmelCase = jnp.any(concept_scores > 0 , axis=1) return has_nsfw_concepts class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : int = CLIPConfig UpperCAmelCase_ : Any = '''clip_input''' UpperCAmelCase_ : List[str] = FlaxStableDiffusionSafetyCheckerModule def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = jnp.floataa , __lowerCAmelCase = True , **__lowerCAmelCase , ): """simple docstring""" if input_shape is None: lowerCAmelCase = (1, 224, 224, 3) lowerCAmelCase = self.module_class(config=__lowerCAmelCase , dtype=__lowerCAmelCase , **__lowerCAmelCase) super().__init__(__lowerCAmelCase , __lowerCAmelCase , input_shape=__lowerCAmelCase , seed=__lowerCAmelCase , dtype=__lowerCAmelCase , _do_init=_do_init) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None): """simple docstring""" lowerCAmelCase = jax.random.normal(__lowerCAmelCase , __lowerCAmelCase) lowerCAmelCase , lowerCAmelCase = jax.random.split(__lowerCAmelCase) lowerCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng} lowerCAmelCase = self.module.init(__lowerCAmelCase , __lowerCAmelCase)["""params"""] return random_params def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , ): """simple docstring""" lowerCAmelCase = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1)) return self.module.apply( {"""params""": params or self.params} , jnp.array(__lowerCAmelCase , dtype=jnp.floataa) , rngs={} , )
272
0
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ : int = 0 # if input_string is "aba" than new_input_string become "a|b|a" lowerCAmelCase__ : Union[str, Any] = """""" lowerCAmelCase__ : Tuple = """""" # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(UpperCamelCase ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = 0, 0 # length[i] shows the length of palindromic substring with center i lowerCAmelCase__ : str = [1 for i in range(len(UpperCamelCase ) )] # for each character in new_string find corresponding palindromic string lowerCAmelCase__ : Optional[int] = 0 for j in range(len(UpperCamelCase ) ): lowerCAmelCase__ : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(UpperCamelCase ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 lowerCAmelCase__ : str = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: lowerCAmelCase__ : Tuple = j - k + 1 # noqa: E741 lowerCAmelCase__ : Dict = j + k - 1 # update max_length and start position if max_length < length[j]: lowerCAmelCase__ : List[str] = length[j] lowerCAmelCase__ : Union[str, Any] = j # create that string lowerCAmelCase__ : List[Any] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
37
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class a__( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Dict = MvpTokenizer UpperCAmelCase_ : Optional[Any] = MvpTokenizerFast UpperCAmelCase_ : str = True UpperCAmelCase_ : List[Any] = filter_roberta_detectors def a_ ( self): """simple docstring""" super().setUp() lowerCAmelCase = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase)))) lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] lowerCAmelCase = {"""unk_token""": """<unk>"""} lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(__lowerCAmelCase) + """\n""") with open(self.merges_file , """w""" , encoding="""utf-8""") as fp: fp.write("""\n""".join(__lowerCAmelCase)) def a_ ( self , **__lowerCAmelCase): """simple docstring""" kwargs.update(self.special_tokens_map) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase) def a_ ( self , **__lowerCAmelCase): """simple docstring""" kwargs.update(self.special_tokens_map) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase) def a_ ( self , __lowerCAmelCase): """simple docstring""" return "lower newer", "lower newer" @cached_property def a_ ( self): """simple docstring""" return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""") @cached_property def a_ ( self): """simple docstring""" return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""") @require_torch def a_ ( self): """simple docstring""" lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] lowerCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase = tokenizer(__lowerCAmelCase , max_length=len(__lowerCAmelCase) , padding=__lowerCAmelCase , return_tensors="""pt""") self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase) self.assertEqual((2, 9) , batch.input_ids.shape) self.assertEqual((2, 9) , batch.attention_mask.shape) lowerCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) # Test that special tokens are reset @require_torch def a_ ( self): """simple docstring""" lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""") # check if input_ids are returned and no labels self.assertIn("""input_ids""" , __lowerCAmelCase) self.assertIn("""attention_mask""" , __lowerCAmelCase) self.assertNotIn("""labels""" , __lowerCAmelCase) self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase) @require_torch def a_ ( self): """simple docstring""" lowerCAmelCase = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase = tokenizer(text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""") self.assertEqual(32 , targets["""input_ids"""].shape[1]) @require_torch def a_ ( self): """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase = tokenizer( ["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""") self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase) self.assertEqual(batch.input_ids.shape , (2, 1024)) @require_torch def a_ ( self): """simple docstring""" lowerCAmelCase = ["""A long paragraph for summarization."""] lowerCAmelCase = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase , return_tensors="""pt""") lowerCAmelCase = inputs["""input_ids"""] lowerCAmelCase = inputs["""labels"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item()) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item()) def a_ ( self): """simple docstring""" pass def a_ ( self): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = """A, <mask> AllenNLP sentence.""" lowerCAmelCase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase) lowerCAmelCase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""])) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , ) lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""]) lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""]) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual( __lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""]) self.assertSequenceEqual( __lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
272
0
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int = 50 ) -> int: """simple docstring""" UpperCamelCase :Any = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F'''{solution() = }''')
38
'''simple docstring''' import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class a__( enum.Enum ): '''simple docstring''' UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : Dict = 1 UpperCAmelCase_ : Any = 2 @add_end_docstrings(lowerCAmelCase__ ) class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : int = ''' In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> ''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" super().__init__(*__lowerCAmelCase , **__lowerCAmelCase) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. lowerCAmelCase = None if self.model.config.prefix is not None: lowerCAmelCase = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. lowerCAmelCase = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params) lowerCAmelCase = {**self._preprocess_params, **preprocess_params} lowerCAmelCase = {**self._forward_params, **forward_params} def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ): """simple docstring""" lowerCAmelCase = {} if prefix is not None: lowerCAmelCase = prefix if prefix: lowerCAmelCase = self.tokenizer( __lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework) lowerCAmelCase = prefix_inputs["""input_ids"""].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected" """ [None, 'hole']""") lowerCAmelCase = handle_long_generation preprocess_params.update(__lowerCAmelCase) lowerCAmelCase = generate_kwargs lowerCAmelCase = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""") if return_tensors is not None: raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""") lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""") lowerCAmelCase = ReturnType.TENSORS if return_type is not None: lowerCAmelCase = return_type if clean_up_tokenization_spaces is not None: lowerCAmelCase = clean_up_tokenization_spaces if stop_sequence is not None: lowerCAmelCase = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase) if len(__lowerCAmelCase) > 1: warnings.warn( """Stopping on a multiple token sequence is not yet supported on transformers. The first token of""" """ the stop sequence will be used as the stop sequence string in the interim.""") lowerCAmelCase = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({"""add_space_before_punct_symbol""": True}) return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase) def __call__( self , __lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" return super().__call__(__lowerCAmelCase , **__lowerCAmelCase) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , **__lowerCAmelCase): """simple docstring""" lowerCAmelCase = self.tokenizer( prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework) lowerCAmelCase = prompt_text if handle_long_generation == "hole": lowerCAmelCase = inputs["""input_ids"""].shape[-1] if "max_new_tokens" in generate_kwargs: lowerCAmelCase = generate_kwargs["""max_new_tokens"""] else: lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len if new_tokens < 0: raise ValueError("""We cannot infer how many new tokens are expected""") if cur_len + new_tokens > self.tokenizer.model_max_length: lowerCAmelCase = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( """We cannot use `hole` to handle this generation the number of desired tokens exceeds the""" """ models max length""") lowerCAmelCase = inputs["""input_ids"""][:, -keep_length:] if "attention_mask" in inputs: lowerCAmelCase = inputs["""attention_mask"""][:, -keep_length:] return inputs def a_ ( self , __lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" lowerCAmelCase = model_inputs["""input_ids"""] lowerCAmelCase = model_inputs.get("""attention_mask""" , __lowerCAmelCase) # Allow empty prompts if input_ids.shape[1] == 0: lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = 1 else: lowerCAmelCase = input_ids.shape[0] lowerCAmelCase = model_inputs.pop("""prompt_text""") # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. lowerCAmelCase = generate_kwargs.pop("""prefix_length""" , 0) if prefix_length > 0: lowerCAmelCase = """max_new_tokens""" in generate_kwargs or ( """generation_config""" in generate_kwargs and generate_kwargs["""generation_config"""].max_new_tokens is not None ) if not has_max_new_tokens: lowerCAmelCase = generate_kwargs.get("""max_length""") or self.model.config.max_length generate_kwargs["max_length"] += prefix_length lowerCAmelCase = """min_new_tokens""" in generate_kwargs or ( """generation_config""" in generate_kwargs and generate_kwargs["""generation_config"""].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL lowerCAmelCase = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = generated_sequence.shape[0] if self.framework == "pt": lowerCAmelCase = generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:]) elif self.framework == "tf": lowerCAmelCase = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:])) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.FULL_TEXT , __lowerCAmelCase=True): """simple docstring""" lowerCAmelCase = model_outputs["""generated_sequence"""][0] lowerCAmelCase = model_outputs["""input_ids"""] lowerCAmelCase = model_outputs["""prompt_text"""] lowerCAmelCase = generated_sequence.numpy().tolist() lowerCAmelCase = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: lowerCAmelCase = {"""generated_token_ids""": sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text lowerCAmelCase = self.tokenizer.decode( __lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: lowerCAmelCase = 0 else: lowerCAmelCase = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )) if return_type == ReturnType.FULL_TEXT: lowerCAmelCase = prompt_text + text[prompt_length:] else: lowerCAmelCase = text[prompt_length:] lowerCAmelCase = {"""generated_text""": all_text} records.append(__lowerCAmelCase) return records
272
0
def __A ( __lowerCAmelCase )-> Union[str, Any]: """simple docstring""" _UpperCAmelCase = [] _UpperCAmelCase = [] _UpperCAmelCase = { '^': 3, '*': 2, '/': 2, '%': 2, '+': 1, '-': 1, } # Priority of each operator _UpperCAmelCase = len(__lowerCAmelCase ) if (len(__lowerCAmelCase ) > 7) else 7 # Print table header for output print( 'Symbol'.center(8 ) , 'Stack'.center(__lowerCAmelCase ) , 'Postfix'.center(__lowerCAmelCase ) , sep=' | ' , ) print('-' * (print_width * 3 + 7) ) for x in infix: if x.isalpha() or x.isdigit(): post_fix.append(__lowerCAmelCase ) # if x is Alphabet / Digit, add it to Postfix elif x == "(": stack.append(__lowerCAmelCase ) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered while stack[-1] != "(": post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix stack.pop() else: if len(__lowerCAmelCase ) == 0: stack.append(__lowerCAmelCase ) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack while len(__lowerCAmelCase ) > 0 and priority[x] <= priority[stack[-1]]: post_fix.append(stack.pop() ) # pop stack & add to Postfix stack.append(__lowerCAmelCase ) # push x to stack print( x.center(8 ) , (''.join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , (''.join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=' | ' , ) # Output in tabular format while len(__lowerCAmelCase ) > 0: # while stack is not empty post_fix.append(stack.pop() ) # pop stack & add to Postfix print( ' '.center(8 ) , (''.join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , (''.join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=' | ' , ) # Output in tabular format return "".join(__lowerCAmelCase ) # return Postfix as str def __A ( __lowerCAmelCase )-> Tuple: """simple docstring""" _UpperCAmelCase = list(infix[::-1] ) # reverse the infix equation for i in range(len(__lowerCAmelCase ) ): if infix[i] == "(": _UpperCAmelCase = ')' # change "(" to ")" elif infix[i] == ")": _UpperCAmelCase = '(' # change ")" to "(" return (infix_2_postfix(''.join(__lowerCAmelCase ) ))[ ::-1 ] # call infix_2_postfix on Infix, return reverse of Postfix if __name__ == "__main__": _a = input('''\nEnter an Infix Equation = ''') # Input an Infix equation _a = ''''''.join(Infix.split()) # Remove spaces from the input print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
39
'''simple docstring''' def snake_case__ ( _A: str ) -> list[int]: '''simple docstring''' lowerCAmelCase = [0 for i in range(len(_A ) )] # initialize interval's left pointer and right pointer lowerCAmelCase , lowerCAmelCase = 0, 0 for i in range(1 , len(_A ) ): # case when current index is inside the interval if i <= right_pointer: lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] ) lowerCAmelCase = min_edge while go_next(_A , _A , _A ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: lowerCAmelCase , lowerCAmelCase = i, i + z_result[i] - 1 return z_result def snake_case__ ( _A: int , _A: list[int] , _A: str ) -> bool: '''simple docstring''' return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]] def snake_case__ ( _A: str , _A: str ) -> int: '''simple docstring''' lowerCAmelCase = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string lowerCAmelCase = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(_A ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
272
0
"""simple docstring""" from __future__ import annotations def lowercase ( A_ )-> None: '''simple docstring''' create_state_space_tree(A_ , [] , 0 , [0 for i in range(len(A_ ) )] ) def lowercase ( A_ , A_ , A_ , A_ , )-> None: '''simple docstring''' if index == len(A_ ): print(A_ ) return for i in range(len(A_ ) ): if not index_used[i]: current_sequence.append(sequence[i] ) a : Dict = True create_state_space_tree(A_ , A_ , index + 1 , A_ ) current_sequence.pop() a : int = False __lowercase = [3, 1, 2, 4] generate_all_permutations(sequence) __lowercase = ["A", "B", "C"] generate_all_permutations(sequence_a)
40
'''simple docstring''' from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : str = '''EncodecFeatureExtractor''' UpperCAmelCase_ : Dict = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" super().__init__(__lowerCAmelCase , __lowerCAmelCase) lowerCAmelCase = self.feature_extractor lowerCAmelCase = False def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True): """simple docstring""" return self.tokenizer.get_decoder_prompt_ids(task=__lowerCAmelCase , language=__lowerCAmelCase , no_timestamps=__lowerCAmelCase) def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase) lowerCAmelCase = kwargs.pop("""sampling_rate""" , __lowerCAmelCase) lowerCAmelCase = kwargs.pop("""text""" , __lowerCAmelCase) if len(__lowerCAmelCase) > 0: lowerCAmelCase = args[0] lowerCAmelCase = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""") if text is not None: lowerCAmelCase = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase) if audio is not None: lowerCAmelCase = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase) if audio is None: return inputs elif text is None: return audio_inputs else: lowerCAmelCase = audio_inputs["""input_values"""] if "padding_mask" in audio_inputs: lowerCAmelCase = audio_inputs["""padding_mask"""] return inputs def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase) lowerCAmelCase = kwargs.pop("""padding_mask""" , __lowerCAmelCase) if len(__lowerCAmelCase) > 0: lowerCAmelCase = args[0] lowerCAmelCase = args[1:] if audio_values is not None: return self._decode_audio(__lowerCAmelCase , padding_mask=__lowerCAmelCase) else: return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase) def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None): """simple docstring""" lowerCAmelCase = to_numpy(__lowerCAmelCase) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = audio_values.shape if padding_mask is None: return list(__lowerCAmelCase) lowerCAmelCase = to_numpy(__lowerCAmelCase) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) lowerCAmelCase = seq_len - padding_mask.shape[-1] lowerCAmelCase = 1 - self.feature_extractor.padding_value lowerCAmelCase = np.pad(__lowerCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__lowerCAmelCase) lowerCAmelCase = audio_values.tolist() for i in range(__lowerCAmelCase): lowerCAmelCase = np.asarray(audio_values[i])[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] lowerCAmelCase = sliced_audio.reshape(__lowerCAmelCase , -1) return audio_values
272
0
'''simple docstring''' import pytest import datasets # Import fixture modules as plugins _A : Union[str, Any] =['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec'''] def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[Any]: # Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit") for item in items: if any(marker in item.keywords for marker in ["""integration""", """unit"""] ): continue item.add_marker(pytest.mark.unit ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int: config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" ) @pytest.fixture(autouse=UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Any: # test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work? lowerCamelCase__ : Dict = tmp_path_factory.getbasetemp() / """cache""" lowerCamelCase__ : Optional[Any] = test_hf_cache_home / """datasets""" lowerCamelCase__ : List[Any] = test_hf_cache_home / """metrics""" lowerCamelCase__ : List[str] = test_hf_cache_home / """modules""" monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(UpperCamelCase ) ) monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(UpperCamelCase ) ) monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(UpperCamelCase ) ) lowerCamelCase__ : str = test_hf_datasets_cache / """downloads""" monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(UpperCamelCase ) ) lowerCamelCase__ : Optional[Any] = test_hf_datasets_cache / """downloads""" / """extracted""" monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(UpperCamelCase ) ) @pytest.fixture(autouse=UpperCamelCase , scope="""session""" ) def SCREAMING_SNAKE_CASE_ () -> int: datasets.disable_progress_bar() @pytest.fixture(autouse=UpperCamelCase ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Union[str, Any]: # don't take tests into account when counting downloads monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , UpperCamelCase ) @pytest.fixture def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Optional[int]: # Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0 # To be removed once SQLAlchemy 2.0 supported monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , UpperCamelCase )
41
'''simple docstring''' import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class a__( unittest.TestCase ): '''simple docstring''' @property def a_ ( self): """simple docstring""" torch.manual_seed(0) lowerCAmelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def a_ ( self): """simple docstring""" lowerCAmelCase = self.dummy_uncond_unet lowerCAmelCase = PNDMScheduler() lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase) pndm.to(__lowerCAmelCase) pndm.set_progress_bar_config(disable=__lowerCAmelCase) lowerCAmelCase = torch.manual_seed(0) lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""").images lowerCAmelCase = torch.manual_seed(0) lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=__lowerCAmelCase)[0] lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 @slow @require_torch class a__( unittest.TestCase ): '''simple docstring''' def a_ ( self): """simple docstring""" lowerCAmelCase = """google/ddpm-cifar10-32""" lowerCAmelCase = UNetaDModel.from_pretrained(__lowerCAmelCase) lowerCAmelCase = PNDMScheduler() lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase) pndm.to(__lowerCAmelCase) pndm.set_progress_bar_config(disable=__lowerCAmelCase) lowerCAmelCase = torch.manual_seed(0) lowerCAmelCase = pndm(generator=__lowerCAmelCase , output_type="""numpy""").images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
272
0
'''simple docstring''' from __future__ import annotations from functools import lru_cache from math import ceil lowercase : Tuple = 100 lowercase : Dict = set(range(3, NUM_PRIMES, 2)) primes.add(2) lowercase : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def SCREAMING_SNAKE_CASE__ ( __A ) -> set[int]: if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} _snake_case = set() _snake_case = 42 _snake_case = 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def SCREAMING_SNAKE_CASE__ ( __A = 5_000 ) -> int | None: for number_to_partition in range(1 , __A ): if len(partition(__A ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F'''{solution() = }''')
42
'''simple docstring''' from string import ascii_lowercase, ascii_uppercase def snake_case__ ( _A: str ) -> str: '''simple docstring''' if not sentence: return "" lowerCAmelCase = dict(zip(_A , _A ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
272
0
from __future__ import annotations from math import pi, sqrt def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' if inductance <= 0: raise ValueError('''Inductance cannot be 0 or negative''' ) elif capacitance <= 0: raise ValueError('''Capacitance cannot be 0 or negative''' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
43
'''simple docstring''' import os import string import sys __lowercase = 1 << 8 __lowercase = { '''tab''': ord('''\t'''), '''newline''': ord('''\r'''), '''esc''': 2_7, '''up''': 6_5 + ARROW_KEY_FLAG, '''down''': 6_6 + ARROW_KEY_FLAG, '''right''': 6_7 + ARROW_KEY_FLAG, '''left''': 6_8 + ARROW_KEY_FLAG, '''mod_int''': 9_1, '''undefined''': sys.maxsize, '''interrupt''': 3, '''insert''': 5_0, '''delete''': 5_1, '''pg_up''': 5_3, '''pg_down''': 5_4, } __lowercase = KEYMAP['''up'''] __lowercase = KEYMAP['''left'''] if sys.platform == "win32": __lowercase = [] __lowercase = { B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, } for i in range(1_0): __lowercase = ord(str(i)) def snake_case__ ( ) -> List[Any]: '''simple docstring''' if os.name == "nt": import msvcrt lowerCAmelCase = """mbcs""" # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(_A ) == 0: # Read the keystroke lowerCAmelCase = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): lowerCAmelCase = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: lowerCAmelCase = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) ) WIN_CH_BUFFER.append(_A ) if ord(_A ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) lowerCAmelCase = chr(KEYMAP["""esc"""] ) except KeyError: lowerCAmelCase = cha[1] else: lowerCAmelCase = ch.decode(_A ) else: lowerCAmelCase = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty lowerCAmelCase = sys.stdin.fileno() lowerCAmelCase = termios.tcgetattr(_A ) try: tty.setraw(_A ) lowerCAmelCase = sys.stdin.read(1 ) finally: termios.tcsetattr(_A , termios.TCSADRAIN , _A ) return ch def snake_case__ ( ) -> Tuple: '''simple docstring''' lowerCAmelCase = get_raw_chars() if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(_A ) == KEYMAP["esc"]: lowerCAmelCase = get_raw_chars() if ord(_A ) == KEYMAP["mod_int"]: lowerCAmelCase = get_raw_chars() if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(_A ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
272
0
"""simple docstring""" from manim import * class __A ( SCREAMING_SNAKE_CASE_ ): def __A ( self ): _lowerCAmelCase : str = Rectangle(height=0.5 , width=0.5 ) _lowerCAmelCase : List[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 ) _lowerCAmelCase : Tuple = Rectangle(height=0.2_5 , width=0.2_5 ) _lowerCAmelCase : Optional[int] = [mem.copy() for i in range(6 )] _lowerCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )] _lowerCAmelCase : List[str] = VGroup(*a__ ).arrange(a__ , buff=0 ) _lowerCAmelCase : Dict = VGroup(*a__ ).arrange(a__ , buff=0 ) _lowerCAmelCase : str = VGroup(a__ , a__ ).arrange(a__ , buff=0 ) _lowerCAmelCase : str = Text("""CPU""" , font_size=24 ) _lowerCAmelCase : str = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(a__ ) _lowerCAmelCase : Union[str, Any] = [mem.copy() for i in range(4 )] _lowerCAmelCase : str = VGroup(*a__ ).arrange(a__ , buff=0 ) _lowerCAmelCase : str = Text("""GPU""" , font_size=24 ) _lowerCAmelCase : str = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ ) gpu.move_to([-1, -1, 0] ) self.add(a__ ) _lowerCAmelCase : Optional[Any] = [mem.copy() for i in range(6 )] _lowerCAmelCase : Optional[Any] = VGroup(*a__ ).arrange(a__ , buff=0 ) _lowerCAmelCase : int = Text("""Model""" , font_size=24 ) _lowerCAmelCase : int = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ ) model.move_to([3, -1.0, 0] ) self.add(a__ ) _lowerCAmelCase : Optional[Any] = [] _lowerCAmelCase : str = [] for i, rect in enumerate(a__ ): _lowerCAmelCase : int = fill.copy().set_fill(a__ , opacity=0.8 ) target.move_to(a__ ) model_arr.append(a__ ) _lowerCAmelCase : Dict = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(a__ , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(a__ ) self.add(*a__ , *a__ ) _lowerCAmelCase : str = [meta_mem.copy() for i in range(6 )] _lowerCAmelCase : Dict = [meta_mem.copy() for i in range(6 )] _lowerCAmelCase : Optional[int] = VGroup(*a__ ).arrange(a__ , buff=0 ) _lowerCAmelCase : Optional[Any] = VGroup(*a__ ).arrange(a__ , buff=0 ) _lowerCAmelCase : int = VGroup(a__ , a__ ).arrange(a__ , buff=0 ) _lowerCAmelCase : Dict = Text("""Disk""" , font_size=24 ) _lowerCAmelCase : Optional[int] = Group(a__ , a__ ).arrange(a__ , buff=0.5 , aligned_edge=a__ ) disk.move_to([-4, -1.2_5, 0] ) self.add(a__ , a__ ) _lowerCAmelCase : str = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _lowerCAmelCase : Dict = MarkupText( F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(a__ , a__ ) _lowerCAmelCase : List[str] = MarkupText( F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , ) blue_text.next_to(a__ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(a__ ) _lowerCAmelCase : List[str] = MarkupText( F"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(a__ ) ) _lowerCAmelCase : Union[str, Any] = Square(0.3 ) input.set_fill(a__ , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , a__ , buff=0.5 ) self.play(Write(a__ ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=a__ , buff=0.0_2 ) self.play(MoveToTarget(a__ ) ) self.play(FadeOut(a__ ) ) _lowerCAmelCase : Dict = Arrow(start=a__ , end=a__ , color=a__ , buff=0.5 ) a.next_to(model_arr[0].get_left() , a__ , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) _lowerCAmelCase : Optional[Any] = MarkupText( F"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(a__ , run_time=3 ) ) _lowerCAmelCase : Optional[int] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.0_2} self.play( Write(a__ ) , Circumscribe(model_arr[0] , color=a__ , **a__ ) , Circumscribe(model_cpu_arr[0] , color=a__ , **a__ ) , Circumscribe(gpu_rect[0] , color=a__ , **a__ ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) _lowerCAmelCase : Any = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.0_2 , a__ , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.0_2 ) _lowerCAmelCase : Union[str, Any] = AnimationGroup( FadeOut(a__ , run_time=0.5 ) , MoveToTarget(a__ , run_time=0.5 ) , FadeIn(a__ , run_time=0.5 ) , lag_ratio=0.2 ) self.play(a__ ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: _lowerCAmelCase : int = 0.7 self.play( Circumscribe(model_arr[i] , **a__ ) , Circumscribe(cpu_left_col_base[i] , **a__ ) , Circumscribe(cpu_left_col_base[i + 1] , color=a__ , **a__ ) , Circumscribe(gpu_rect[0] , color=a__ , **a__ ) , Circumscribe(model_arr[i + 1] , color=a__ , **a__ ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=a__ , **a__ ) , Circumscribe(cpu_left_col_base[-1] , color=a__ , **a__ ) , Circumscribe(gpu_rect[0] , color=a__ , **a__ ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) _lowerCAmelCase : Any = a_c _lowerCAmelCase : Any = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 ) self.play( FadeOut(a__ ) , FadeOut(a__ , run_time=0.5 ) , ) _lowerCAmelCase : List[str] = MarkupText(F"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(a__ , run_time=3 ) , MoveToTarget(a__ ) ) self.wait()
44
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __lowercase = logging.get_logger(__name__) class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = ['''input_features'''] def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=16000 , __lowerCAmelCase=160 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , **__lowerCAmelCase , ): """simple docstring""" super().__init__( feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , ) lowerCAmelCase = n_fft lowerCAmelCase = hop_length lowerCAmelCase = chunk_length lowerCAmelCase = chunk_length * sampling_rate lowerCAmelCase = self.n_samples // hop_length lowerCAmelCase = sampling_rate lowerCAmelCase = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , ) def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = spectrogram( __lowerCAmelCase , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , ) lowerCAmelCase = log_spec[:, :-1] lowerCAmelCase = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0) lowerCAmelCase = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0): """simple docstring""" if attention_mask is not None: lowerCAmelCase = np.array(__lowerCAmelCase , np.intaa) lowerCAmelCase = [] for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1)): lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7) if length < normed_slice.shape[0]: lowerCAmelCase = padding_value normed_input_values.append(__lowerCAmelCase) else: lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values] return normed_input_values def __call__( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "max_length" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" f" was sampled with {self.sampling_rate} and not {sampling_rate}.") else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""") lowerCAmelCase = isinstance(__lowerCAmelCase , np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}") lowerCAmelCase = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list))) ) if is_batched: lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray): lowerCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa) elif isinstance(__lowerCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa): lowerCAmelCase = raw_speech.astype(np.floataa) # always return batch if not is_batched: lowerCAmelCase = [np.asarray([raw_speech]).T] lowerCAmelCase = BatchFeature({"""input_features""": raw_speech}) # convert into correct format for padding lowerCAmelCase = self.pad( __lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: lowerCAmelCase = self.zero_mean_unit_var_norm( padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , ) lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0) # make sure list is in array format lowerCAmelCase = padded_inputs.get("""input_features""").transpose(2 , 0 , 1) lowerCAmelCase = [self._np_extract_fbank_features(__lowerCAmelCase) for waveform in input_features[0]] if isinstance(input_features[0] , __lowerCAmelCase): lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for feature in input_features] else: lowerCAmelCase = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) lowerCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length] if return_tensors is not None: lowerCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase) return padded_inputs def a_ ( self): """simple docstring""" lowerCAmelCase = copy.deepcopy(self.__dict__) lowerCAmelCase = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
272
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool lowercase_ = { "Acehnese Arabic": "ace_Arab", "Acehnese Latin": "ace_Latn", "Mesopotamian Arabic": "acm_Arab", "Ta'izzi-Adeni Arabic": "acq_Arab", "Tunisian Arabic": "aeb_Arab", "Afrikaans": "afr_Latn", "South Levantine Arabic": "ajp_Arab", "Akan": "aka_Latn", "Amharic": "amh_Ethi", "North Levantine Arabic": "apc_Arab", "Modern Standard Arabic": "arb_Arab", "Modern Standard Arabic Romanized": "arb_Latn", "Najdi Arabic": "ars_Arab", "Moroccan Arabic": "ary_Arab", "Egyptian Arabic": "arz_Arab", "Assamese": "asm_Beng", "Asturian": "ast_Latn", "Awadhi": "awa_Deva", "Central Aymara": "ayr_Latn", "South Azerbaijani": "azb_Arab", "North Azerbaijani": "azj_Latn", "Bashkir": "bak_Cyrl", "Bambara": "bam_Latn", "Balinese": "ban_Latn", "Belarusian": "bel_Cyrl", "Bemba": "bem_Latn", "Bengali": "ben_Beng", "Bhojpuri": "bho_Deva", "Banjar Arabic": "bjn_Arab", "Banjar Latin": "bjn_Latn", "Standard Tibetan": "bod_Tibt", "Bosnian": "bos_Latn", "Buginese": "bug_Latn", "Bulgarian": "bul_Cyrl", "Catalan": "cat_Latn", "Cebuano": "ceb_Latn", "Czech": "ces_Latn", "Chokwe": "cjk_Latn", "Central Kurdish": "ckb_Arab", "Crimean Tatar": "crh_Latn", "Welsh": "cym_Latn", "Danish": "dan_Latn", "German": "deu_Latn", "Southwestern Dinka": "dik_Latn", "Dyula": "dyu_Latn", "Dzongkha": "dzo_Tibt", "Greek": "ell_Grek", "English": "eng_Latn", "Esperanto": "epo_Latn", "Estonian": "est_Latn", "Basque": "eus_Latn", "Ewe": "ewe_Latn", "Faroese": "fao_Latn", "Fijian": "fij_Latn", "Finnish": "fin_Latn", "Fon": "fon_Latn", "French": "fra_Latn", "Friulian": "fur_Latn", "Nigerian Fulfulde": "fuv_Latn", "Scottish Gaelic": "gla_Latn", "Irish": "gle_Latn", "Galician": "glg_Latn", "Guarani": "grn_Latn", "Gujarati": "guj_Gujr", "Haitian Creole": "hat_Latn", "Hausa": "hau_Latn", "Hebrew": "heb_Hebr", "Hindi": "hin_Deva", "Chhattisgarhi": "hne_Deva", "Croatian": "hrv_Latn", "Hungarian": "hun_Latn", "Armenian": "hye_Armn", "Igbo": "ibo_Latn", "Ilocano": "ilo_Latn", "Indonesian": "ind_Latn", "Icelandic": "isl_Latn", "Italian": "ita_Latn", "Javanese": "jav_Latn", "Japanese": "jpn_Jpan", "Kabyle": "kab_Latn", "Jingpho": "kac_Latn", "Kamba": "kam_Latn", "Kannada": "kan_Knda", "Kashmiri Arabic": "kas_Arab", "Kashmiri Devanagari": "kas_Deva", "Georgian": "kat_Geor", "Central Kanuri Arabic": "knc_Arab", "Central Kanuri Latin": "knc_Latn", "Kazakh": "kaz_Cyrl", "Kabiyè": "kbp_Latn", "Kabuverdianu": "kea_Latn", "Khmer": "khm_Khmr", "Kikuyu": "kik_Latn", "Kinyarwanda": "kin_Latn", "Kyrgyz": "kir_Cyrl", "Kimbundu": "kmb_Latn", "Northern Kurdish": "kmr_Latn", "Kikongo": "kon_Latn", "Korean": "kor_Hang", "Lao": "lao_Laoo", "Ligurian": "lij_Latn", "Limburgish": "lim_Latn", "Lingala": "lin_Latn", "Lithuanian": "lit_Latn", "Lombard": "lmo_Latn", "Latgalian": "ltg_Latn", "Luxembourgish": "ltz_Latn", "Luba-Kasai": "lua_Latn", "Ganda": "lug_Latn", "Luo": "luo_Latn", "Mizo": "lus_Latn", "Standard Latvian": "lvs_Latn", "Magahi": "mag_Deva", "Maithili": "mai_Deva", "Malayalam": "mal_Mlym", "Marathi": "mar_Deva", "Minangkabau Arabic ": "min_Arab", "Minangkabau Latin": "min_Latn", "Macedonian": "mkd_Cyrl", "Plateau Malagasy": "plt_Latn", "Maltese": "mlt_Latn", "Meitei Bengali": "mni_Beng", "Halh Mongolian": "khk_Cyrl", "Mossi": "mos_Latn", "Maori": "mri_Latn", "Burmese": "mya_Mymr", "Dutch": "nld_Latn", "Norwegian Nynorsk": "nno_Latn", "Norwegian Bokmål": "nob_Latn", "Nepali": "npi_Deva", "Northern Sotho": "nso_Latn", "Nuer": "nus_Latn", "Nyanja": "nya_Latn", "Occitan": "oci_Latn", "West Central Oromo": "gaz_Latn", "Odia": "ory_Orya", "Pangasinan": "pag_Latn", "Eastern Panjabi": "pan_Guru", "Papiamento": "pap_Latn", "Western Persian": "pes_Arab", "Polish": "pol_Latn", "Portuguese": "por_Latn", "Dari": "prs_Arab", "Southern Pashto": "pbt_Arab", "Ayacucho Quechua": "quy_Latn", "Romanian": "ron_Latn", "Rundi": "run_Latn", "Russian": "rus_Cyrl", "Sango": "sag_Latn", "Sanskrit": "san_Deva", "Santali": "sat_Olck", "Sicilian": "scn_Latn", "Shan": "shn_Mymr", "Sinhala": "sin_Sinh", "Slovak": "slk_Latn", "Slovenian": "slv_Latn", "Samoan": "smo_Latn", "Shona": "sna_Latn", "Sindhi": "snd_Arab", "Somali": "som_Latn", "Southern Sotho": "sot_Latn", "Spanish": "spa_Latn", "Tosk Albanian": "als_Latn", "Sardinian": "srd_Latn", "Serbian": "srp_Cyrl", "Swati": "ssw_Latn", "Sundanese": "sun_Latn", "Swedish": "swe_Latn", "Swahili": "swh_Latn", "Silesian": "szl_Latn", "Tamil": "tam_Taml", "Tatar": "tat_Cyrl", "Telugu": "tel_Telu", "Tajik": "tgk_Cyrl", "Tagalog": "tgl_Latn", "Thai": "tha_Thai", "Tigrinya": "tir_Ethi", "Tamasheq Latin": "taq_Latn", "Tamasheq Tifinagh": "taq_Tfng", "Tok Pisin": "tpi_Latn", "Tswana": "tsn_Latn", "Tsonga": "tso_Latn", "Turkmen": "tuk_Latn", "Tumbuka": "tum_Latn", "Turkish": "tur_Latn", "Twi": "twi_Latn", "Central Atlas Tamazight": "tzm_Tfng", "Uyghur": "uig_Arab", "Ukrainian": "ukr_Cyrl", "Umbundu": "umb_Latn", "Urdu": "urd_Arab", "Northern Uzbek": "uzn_Latn", "Venetian": "vec_Latn", "Vietnamese": "vie_Latn", "Waray": "war_Latn", "Wolof": "wol_Latn", "Xhosa": "xho_Latn", "Eastern Yiddish": "ydd_Hebr", "Yoruba": "yor_Latn", "Yue Chinese": "yue_Hant", "Chinese Simplified": "zho_Hans", "Chinese Traditional": "zho_Hant", "Standard Malay": "zsm_Latn", "Zulu": "zul_Latn", } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCAmelCase : List[str] = 'facebook/nllb-200-distilled-600M' __UpperCAmelCase : int = ( 'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ' 'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ' 'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ' 'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.' ) __UpperCAmelCase : Tuple = 'translator' __UpperCAmelCase : int = AutoTokenizer __UpperCAmelCase : Dict = AutoModelForSeqaSeqLM __UpperCAmelCase : List[str] = LANGUAGE_CODES __UpperCAmelCase : Dict = ['text', 'text', 'text'] __UpperCAmelCase : Dict = ['text'] def __UpperCAmelCase ( self , _a , _a , _a ): if src_lang not in self.lang_to_code: raise ValueError(f'''{src_lang} is not a supported language.''' ) if tgt_lang not in self.lang_to_code: raise ValueError(f'''{tgt_lang} is not a supported language.''' ) __a = self.lang_to_code[src_lang] __a = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( _a , return_tensors='''pt''' , src_lang=_a , tgt_lang=_a ) def __UpperCAmelCase ( self , _a ): return self.model.generate(**_a ) def __UpperCAmelCase ( self , _a ): return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=_a )
45
'''simple docstring''' from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig __lowercase = logging.get_logger(__name__) __lowercase = '''T5Config''' class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : List[str] = '''mt5''' UpperCAmelCase_ : Tuple = MTaConfig class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : List[Any] = '''mt5''' UpperCAmelCase_ : int = MTaConfig class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : Tuple = '''mt5''' UpperCAmelCase_ : Union[str, Any] = MTaConfig
272
0
"""simple docstring""" import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {"vocab_file": "spiece.model"} SCREAMING_SNAKE_CASE__ = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } SCREAMING_SNAKE_CASE__ = { "AI-Sweden/gpt-sw3-126m": 2_048, "AI-Sweden/gpt-sw3-350m": 2_048, "AI-Sweden/gpt-sw3-1.6b": 2_048, "AI-Sweden/gpt-sw3-6.7b": 2_048, "AI-Sweden/gpt-sw3-20b": 2_048, } class lowercase ( _UpperCAmelCase ): _SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask'] def __init__( self , lowercase , lowercase=False , lowercase=False , lowercase=False , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase = None , **lowercase , ) -> None: lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs lowerCAmelCase = kwargs.get("""name_or_path""" ) if name_or_path is None: logger.warning( """name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,""" """ you are testing the model, this can safely be ignored""" ) lowerCAmelCase = """None""" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing lowerCAmelCase = """<|endoftext|>""" if eos_token is None else eos_token lowerCAmelCase = """<unk>""" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: lowerCAmelCase = unk_token if pad_token is None else pad_token lowerCAmelCase = eos_token if bos_token is None else bos_token else: lowerCAmelCase = """<pad>""" if pad_token is None else pad_token lowerCAmelCase = """<s>""" if bos_token is None else bos_token super().__init__( do_lower_case=lowercase , remove_space=lowercase , keep_accents=lowercase , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , ) lowerCAmelCase = do_lower_case lowerCAmelCase = remove_space lowerCAmelCase = keep_accents lowerCAmelCase = vocab_file lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowercase ) # Used for whitespace normalization in input texts # fmt : off lowerCAmelCase = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """„"""} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing lowerCAmelCase = re.compile( f'[{"".join(map(lowercase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]' ) def __getstate__( self ) -> Optional[int]: lowerCAmelCase = self.__dict__.copy() lowerCAmelCase = None return state def __setstate__( self , lowercase ) -> str: lowerCAmelCase = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowerCAmelCase = {} lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def _snake_case ( self ) -> int: return len(self.sp_model ) def _snake_case ( self , lowercase ) -> str: lowerCAmelCase = self.non_printing_characters_re.sub("""""" , lowercase ) # Normalize whitespaces lowerCAmelCase = """""".join([char if char not in self.whitespaces else """ """ for char in text] ) # NFC Unicode normalization lowerCAmelCase = unicodedata.normalize("""NFC""" , lowercase ) return text def _snake_case ( self , lowercase , **lowercase ) -> List[str]: lowerCAmelCase = self.preprocess_text(lowercase ) return self.sp_model.encode(lowercase , out_type=lowercase ) def _snake_case ( self , lowercase ) -> int: return self.sp_model.PieceToId(lowercase ) def _snake_case ( self , lowercase ) -> str: return self.sp_model.IdToPiece(lowercase ) @staticmethod def _snake_case ( lowercase ) -> str: return out_string def _snake_case ( self , lowercase ) -> str: lowerCAmelCase = [] lowerCAmelCase = """""" lowerCAmelCase = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowercase ) + token lowerCAmelCase = True lowerCAmelCase = [] else: current_sub_tokens.append(lowercase ) lowerCAmelCase = False out_string += self.sp_model.decode(lowercase ) return out_string def _snake_case ( self ) -> Dict[str, int]: lowerCAmelCase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]: if not os.path.isdir(lowercase ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowerCAmelCase = os.path.join( lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowercase ) elif not os.path.isfile(self.vocab_file ): with open(lowercase , """wb""" ) as fi: lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(lowercase ) return (out_vocab_file,) def _snake_case ( self , lowercase , lowercase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(lowercase , lowercase ): lowerCAmelCase = self.preprocess_text(lowercase ) lowerCAmelCase = self.sp_model.encode(lowercase ) else: lowerCAmelCase = [self.preprocess_text(lowercase ) for t in text] lowerCAmelCase = self.sp_model.encode(lowercase ) if return_tensors is True or return_tensors == "pt": lowerCAmelCase = torch.tensor(lowercase ) return token_ids def _snake_case ( self , lowercase ) -> str: return self.sp_model.decode(lowercase ) def _snake_case ( self , lowercase ) -> List[int]: lowerCAmelCase = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()] lowerCAmelCase = ( f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(lowercase ) + f'{self.bos_token}Bot:' ) return self.encode(text=lowercase )
46
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __lowercase = { '''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''', '''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''', } class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : List[str] = '''ernie_m''' UpperCAmelCase_ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self , __lowerCAmelCase = 250002 , __lowerCAmelCase = 768 , __lowerCAmelCase = 12 , __lowerCAmelCase = 12 , __lowerCAmelCase = 3072 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 514 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1E-0_5 , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ): """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = classifier_dropout lowerCAmelCase = is_decoder lowerCAmelCase = act_dropout
272
0
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva lowerCamelCase : Tuple = "" lowerCamelCase : Union[str, Any] = "" lowerCamelCase : Dict = "" lowerCamelCase : str = 1 # (0 is vertical, 1 is horizontal) def _lowerCAmelCase ( ) -> None: """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =get_dataset(_UpperCamelCase , _UpperCamelCase ) print('Processing...' ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =update_image_and_anno(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for index, image in enumerate(_UpperCamelCase ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' _SCREAMING_SNAKE_CASE =random_chars(32 ) _SCREAMING_SNAKE_CASE =paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0] _SCREAMING_SNAKE_CASE =f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}" cva.imwrite(f"/{file_root}.jpg" , _UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f"Success {index+1}/{len(_UpperCamelCase )} with {file_name}" ) _SCREAMING_SNAKE_CASE =[] for anno in new_annos[index]: _SCREAMING_SNAKE_CASE =f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}" annos_list.append(_UpperCamelCase ) with open(f"/{file_root}.txt" , 'w' ) as outfile: outfile.write('\n'.join(line for line in annos_list ) ) def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> tuple[list, list]: """simple docstring""" _SCREAMING_SNAKE_CASE =[] _SCREAMING_SNAKE_CASE =[] for label_file in glob.glob(os.path.join(_UpperCamelCase , '*.txt' ) ): _SCREAMING_SNAKE_CASE =label_file.split(os.sep )[-1].rsplit('.' , 1 )[0] with open(_UpperCamelCase ) as in_file: _SCREAMING_SNAKE_CASE =in_file.readlines() _SCREAMING_SNAKE_CASE =os.path.join(_UpperCamelCase , f"{label_name}.jpg" ) _SCREAMING_SNAKE_CASE =[] for obj_list in obj_lists: _SCREAMING_SNAKE_CASE =obj_list.rstrip('\n' ).split(' ' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(_UpperCamelCase ) labels.append(_UpperCamelCase ) return img_paths, labels def _lowerCAmelCase ( _UpperCamelCase : list , _UpperCamelCase : list , _UpperCamelCase : int = 1 ) -> tuple[list, list, list]: """simple docstring""" _SCREAMING_SNAKE_CASE =[] _SCREAMING_SNAKE_CASE =[] _SCREAMING_SNAKE_CASE =[] for idx in range(len(_UpperCamelCase ) ): _SCREAMING_SNAKE_CASE =[] _SCREAMING_SNAKE_CASE =img_list[idx] path_list.append(_UpperCamelCase ) _SCREAMING_SNAKE_CASE =anno_list[idx] _SCREAMING_SNAKE_CASE =cva.imread(_UpperCamelCase ) if flip_type == 1: _SCREAMING_SNAKE_CASE =cva.flip(_UpperCamelCase , _UpperCamelCase ) for bbox in img_annos: _SCREAMING_SNAKE_CASE =1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: _SCREAMING_SNAKE_CASE =cva.flip(_UpperCamelCase , _UpperCamelCase ) for bbox in img_annos: _SCREAMING_SNAKE_CASE =1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(_UpperCamelCase ) new_imgs_list.append(_UpperCamelCase ) return new_imgs_list, new_annos_lists, path_list def _lowerCAmelCase ( _UpperCamelCase : int = 32 ) -> str: """simple docstring""" assert number_char > 1, "The number of character should greater than 1" _SCREAMING_SNAKE_CASE =ascii_lowercase + digits return "".join(random.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) ) if __name__ == "__main__": main() print("DONE ✅")
47
'''simple docstring''' import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __lowercase = logging.getLogger(__name__) class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : Any = '''sequence-classification''' def __init__( self , __lowerCAmelCase): """simple docstring""" if type(__lowerCAmelCase) == dict: lowerCAmelCase = Namespace(**__lowerCAmelCase) lowerCAmelCase = glue_output_modes[hparams.task] lowerCAmelCase = glue_tasks_num_labels[hparams.task] super().__init__(__lowerCAmelCase , __lowerCAmelCase , self.mode) def a_ ( self , **__lowerCAmelCase): """simple docstring""" return self.model(**__lowerCAmelCase) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None lowerCAmelCase = self(**__lowerCAmelCase) lowerCAmelCase = outputs[0] lowerCAmelCase = self.trainer.lr_schedulers[0]["""scheduler"""] lowerCAmelCase = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def a_ ( self): """simple docstring""" lowerCAmelCase = self.hparams lowerCAmelCase = processors[args.task]() lowerCAmelCase = processor.get_labels() for mode in ["train", "dev"]: lowerCAmelCase = self._feature_file(__lowerCAmelCase) if os.path.exists(__lowerCAmelCase) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , __lowerCAmelCase) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir) lowerCAmelCase = ( processor.get_dev_examples(args.data_dir) if mode == """dev""" else processor.get_train_examples(args.data_dir) ) lowerCAmelCase = convert_examples_to_features( __lowerCAmelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("""Saving features into cached file %s""" , __lowerCAmelCase) torch.save(__lowerCAmelCase , __lowerCAmelCase) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False): """simple docstring""" lowerCAmelCase = """dev""" if mode == """test""" else mode lowerCAmelCase = self._feature_file(__lowerCAmelCase) logger.info("""Loading features from cached file %s""" , __lowerCAmelCase) lowerCAmelCase = torch.load(__lowerCAmelCase) lowerCAmelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long) lowerCAmelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long) lowerCAmelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long) if self.hparams.glue_output_mode == "classification": lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.long) elif self.hparams.glue_output_mode == "regression": lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.float) return DataLoader( TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase) , batch_size=__lowerCAmelCase , shuffle=__lowerCAmelCase , ) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None lowerCAmelCase = self(**__lowerCAmelCase) lowerCAmelCase , lowerCAmelCase = outputs[:2] lowerCAmelCase = logits.detach().cpu().numpy() lowerCAmelCase = inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = torch.stack([x["""val_loss"""] for x in outputs]).mean().detach().cpu().item() lowerCAmelCase = np.concatenate([x["""pred"""] for x in outputs] , axis=0) if self.hparams.glue_output_mode == "classification": lowerCAmelCase = np.argmax(__lowerCAmelCase , axis=1) elif self.hparams.glue_output_mode == "regression": lowerCAmelCase = np.squeeze(__lowerCAmelCase) lowerCAmelCase = np.concatenate([x["""target"""] for x in outputs] , axis=0) lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])] lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])] lowerCAmelCase = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCAmelCase , __lowerCAmelCase)} lowerCAmelCase = dict(results.items()) lowerCAmelCase = results return ret, preds_list, out_label_list def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase) lowerCAmelCase = ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase) lowerCAmelCase = ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def a_ ( __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase) parser.add_argument( """--max_seq_length""" , default=128 , type=__lowerCAmelCase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--task""" , default="""""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The GLUE task to run""" , ) parser.add_argument( """--gpus""" , default=0 , type=__lowerCAmelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""") return parser def snake_case__ ( ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase = argparse.ArgumentParser() add_generic_args(_A , os.getcwd() ) lowerCAmelCase = GLUETransformer.add_model_specific_args(_A , os.getcwd() ) lowerCAmelCase = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: lowerCAmelCase = os.path.join( """./results""" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , ) os.makedirs(args.output_dir ) lowerCAmelCase = GLUETransformer(_A ) lowerCAmelCase = generic_train(_A , _A ) # Optionally, predict on dev set and write to output_dir if args.do_predict: lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=_A ) ) lowerCAmelCase = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_A ) if __name__ == "__main__": main()
272
0
import math def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float: if ( not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * power_factor def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float: if ( not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("power_factor must be a valid float value between -1 and 1." ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
48
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor __lowercase = logging.get_logger(__name__) class a__( lowerCAmelCase__ ): '''simple docstring''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" warnings.warn( """The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use DeformableDetrImageProcessor instead.""" , __lowerCAmelCase , ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
272
0
def __snake_case ( ): for n in range(1 , 1000000 ): yield n * (n + 1) // 2 def __snake_case ( _UpperCAmelCase ): __a = 1 __a = 2 while i * i <= n: __a = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def __snake_case ( ): return next(i for i in triangle_number_generator() if count_divisors(_UpperCAmelCase ) > 500 ) if __name__ == "__main__": print(solution())
49
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowercase = { '''configuration_instructblip''': [ '''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InstructBlipConfig''', '''InstructBlipQFormerConfig''', '''InstructBlipVisionConfig''', ], '''processing_instructblip''': ['''InstructBlipProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InstructBlipQFormerModel''', '''InstructBlipPreTrainedModel''', '''InstructBlipForConditionalGeneration''', '''InstructBlipVisionModel''', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
272
0
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ): UpperCAmelCase__ = BertJapaneseTokenizer UpperCAmelCase__ = False UpperCAmelCase__ = True def A_ ( self : Optional[int] ) -> Dict: super().setUp() lowerCamelCase__ : str = [ '[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは', '世界', '##世界', '、', '##、', '。', '##。', ] lowerCamelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def A_ ( self : Tuple , UpperCAmelCase : Dict ) -> Optional[int]: lowerCamelCase__ : Tuple = 'こんにちは、世界。 \nこんばんは、世界。' lowerCamelCase__ : Union[str, Any] = 'こんにちは 、 世界 。 こんばんは 、 世界 。' return input_text, output_text def A_ ( self : List[str] , UpperCAmelCase : List[Any] ) -> str: lowerCamelCase__ , lowerCamelCase__ : str = self.get_input_output_texts(UpperCAmelCase ) lowerCamelCase__ : int = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) lowerCamelCase__ : Union[str, Any] = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase ) return text, ids def A_ ( self : Dict ) -> List[str]: pass # TODO add if relevant def A_ ( self : List[Any] ) -> Dict: pass # TODO add if relevant def A_ ( self : Any ) -> Dict: pass # TODO add if relevant def A_ ( self : Any ) -> Optional[int]: lowerCamelCase__ : Optional[Any] = self.tokenizer_class(self.vocab_file ) lowerCamelCase__ : str = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' ) self.assertListEqual(UpperCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def A_ ( self : Any ) -> Tuple: lowerCamelCase__ : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' ) self.assertIsNotNone(UpperCAmelCase ) lowerCamelCase__ : Any = 'こんにちは、世界。\nこんばんは、世界。' lowerCamelCase__ : Union[str, Any] = tokenizer.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCamelCase__ : Any = os.path.join(self.tmpdirname , 'tokenizer.bin' ) with open(UpperCAmelCase , 'wb' ) as handle: pickle.dump(UpperCAmelCase , UpperCAmelCase ) with open(UpperCAmelCase , 'rb' ) as handle: lowerCamelCase__ : Tuple = pickle.load(UpperCAmelCase ) lowerCamelCase__ : List[str] = tokenizer_new.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def A_ ( self : List[Any] ) -> List[Any]: lowerCamelCase__ : Union[str, Any] = MecabTokenizer(mecab_dic='ipadic' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , ) def A_ ( self : Union[str, Any] ) -> Any: try: lowerCamelCase__ : Optional[int] = MecabTokenizer(mecab_dic='unidic_lite' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , ) def A_ ( self : int ) -> Union[str, Any]: try: lowerCamelCase__ : Optional[Any] = MecabTokenizer(mecab_dic='unidic' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , ) def A_ ( self : str ) -> Optional[int]: lowerCamelCase__ : Union[str, Any] = MecabTokenizer(do_lower_case=UpperCAmelCase , mecab_dic='ipadic' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , ) def A_ ( self : int ) -> List[str]: try: lowerCamelCase__ : Optional[int] = MecabTokenizer( do_lower_case=UpperCAmelCase , normalize_text=UpperCAmelCase , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , ) def A_ ( self : Dict ) -> Tuple: lowerCamelCase__ : Any = MecabTokenizer(normalize_text=UpperCAmelCase , mecab_dic='ipadic' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , ) @require_sudachi def A_ ( self : List[Any] ) -> Optional[Any]: lowerCamelCase__ : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' ) self.assertIsNotNone(UpperCAmelCase ) lowerCamelCase__ : Union[str, Any] = 'こんにちは、世界。\nこんばんは、世界。' lowerCamelCase__ : List[Any] = tokenizer.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' ) with open(UpperCAmelCase , 'wb' ) as handle: pickle.dump(UpperCAmelCase , UpperCAmelCase ) with open(UpperCAmelCase , 'rb' ) as handle: lowerCamelCase__ : Optional[int] = pickle.load(UpperCAmelCase ) lowerCamelCase__ : Optional[int] = tokenizer_new.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) @require_sudachi def A_ ( self : Dict ) -> int: lowerCamelCase__ : Any = SudachiTokenizer(sudachi_dict_type='core' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , ) @require_sudachi def A_ ( self : Dict ) -> Optional[Any]: lowerCamelCase__ : Optional[Any] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' ) self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] ) @require_sudachi def A_ ( self : Any ) -> int: lowerCamelCase__ : Union[str, Any] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' ) self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] ) @require_sudachi def A_ ( self : Any ) -> Union[str, Any]: lowerCamelCase__ : Dict = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' ) self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] ) @require_sudachi def A_ ( self : str ) -> Optional[int]: lowerCamelCase__ : List[str] = SudachiTokenizer(do_lower_case=UpperCAmelCase , sudachi_dict_type='core' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , ) @require_sudachi def A_ ( self : Union[str, Any] ) -> Tuple: lowerCamelCase__ : int = SudachiTokenizer(normalize_text=UpperCAmelCase , sudachi_dict_type='core' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , ) @require_sudachi def A_ ( self : List[Any] ) -> Tuple: lowerCamelCase__ : Union[str, Any] = SudachiTokenizer(trim_whitespace=UpperCAmelCase , sudachi_dict_type='core' ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , ) @require_jumanpp def A_ ( self : str ) -> List[str]: lowerCamelCase__ : Union[str, Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' ) self.assertIsNotNone(UpperCAmelCase ) lowerCamelCase__ : Any = 'こんにちは、世界。\nこんばんは、世界。' lowerCamelCase__ : Any = tokenizer.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowerCamelCase__ : Any = os.path.join(self.tmpdirname , 'tokenizer.bin' ) with open(UpperCAmelCase , 'wb' ) as handle: pickle.dump(UpperCAmelCase , UpperCAmelCase ) with open(UpperCAmelCase , 'rb' ) as handle: lowerCamelCase__ : Optional[int] = pickle.load(UpperCAmelCase ) lowerCamelCase__ : Optional[Any] = tokenizer_new.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) @require_jumanpp def A_ ( self : Tuple ) -> Tuple: lowerCamelCase__ : List[str] = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , ) @require_jumanpp def A_ ( self : str ) -> Optional[Any]: lowerCamelCase__ : List[str] = JumanppTokenizer(do_lower_case=UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , ) @require_jumanpp def A_ ( self : Tuple ) -> List[str]: lowerCamelCase__ : int = JumanppTokenizer(normalize_text=UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , ) @require_jumanpp def A_ ( self : str ) -> Dict: lowerCamelCase__ : Optional[Any] = JumanppTokenizer(trim_whitespace=UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , ) @require_jumanpp def A_ ( self : Any ) -> Any: lowerCamelCase__ : Union[str, Any] = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , ) def A_ ( self : List[Any] ) -> int: lowerCamelCase__ : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは'] lowerCamelCase__ : int = {} for i, token in enumerate(UpperCAmelCase ): lowerCamelCase__ : List[Any] = i lowerCamelCase__ : int = WordpieceTokenizer(vocab=UpperCAmelCase , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] ) self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] ) self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] ) def A_ ( self : Tuple ) -> str: lowerCamelCase__ : int = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' ) lowerCamelCase__ : List[str] = tokenizer.subword_tokenizer lowerCamelCase__ : Any = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' ) self.assertListEqual(UpperCAmelCase , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] ) lowerCamelCase__ : Optional[int] = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' ) self.assertListEqual(UpperCAmelCase , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] ) def A_ ( self : Dict ) -> List[Any]: lowerCamelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' ) lowerCamelCase__ : int = tokenizer.encode('ありがとう。' , add_special_tokens=UpperCAmelCase ) lowerCamelCase__ : List[str] = tokenizer.encode('どういたしまして。' , add_special_tokens=UpperCAmelCase ) lowerCamelCase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase ) lowerCamelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ): UpperCAmelCase__ = BertJapaneseTokenizer UpperCAmelCase__ = False def A_ ( self : Dict ) -> Any: super().setUp() lowerCamelCase__ : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。'] lowerCamelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def A_ ( self : List[Any] , **UpperCAmelCase : str ) -> List[str]: return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **UpperCAmelCase ) def A_ ( self : List[str] , UpperCAmelCase : Union[str, Any] ) -> List[Any]: lowerCamelCase__ : str = 'こんにちは、世界。 \nこんばんは、世界。' lowerCamelCase__ : List[str] = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。' return input_text, output_text def A_ ( self : Optional[Any] ) -> List[Any]: pass # TODO add if relevant def A_ ( self : Tuple ) -> Union[str, Any]: pass # TODO add if relevant def A_ ( self : Optional[Any] ) -> Optional[int]: pass # TODO add if relevant def A_ ( self : Tuple ) -> Tuple: lowerCamelCase__ : Optional[int] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' ) lowerCamelCase__ : List[str] = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' ) self.assertListEqual( UpperCAmelCase , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def A_ ( self : Dict ) -> Any: lowerCamelCase__ : Any = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。'] lowerCamelCase__ : Optional[int] = {} for i, token in enumerate(UpperCAmelCase ): lowerCamelCase__ : Union[str, Any] = i lowerCamelCase__ : Optional[int] = CharacterTokenizer(vocab=UpperCAmelCase , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] ) self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] ) def A_ ( self : Any ) -> str: lowerCamelCase__ : Dict = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' ) lowerCamelCase__ : List[Any] = tokenizer.encode('ありがとう。' , add_special_tokens=UpperCAmelCase ) lowerCamelCase__ : Union[str, Any] = tokenizer.encode('どういたしまして。' , add_special_tokens=UpperCAmelCase ) lowerCamelCase__ : Tuple = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase ) lowerCamelCase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class lowerCAmelCase ( unittest.TestCase ): def A_ ( self : List[str] ) -> Dict: lowerCamelCase__ : Union[str, Any] = 'cl-tohoku/bert-base-japanese' lowerCamelCase__ : int = AutoTokenizer.from_pretrained(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) class lowerCAmelCase ( unittest.TestCase ): def A_ ( self : Tuple ) -> Optional[int]: lowerCamelCase__ : Union[str, Any] = 'cl-tohoku/bert-base-japanese' with self.assertLogs('transformers' , level='WARNING' ) as cm: BertTokenizer.from_pretrained(UpperCAmelCase ) self.assertTrue( cm.records[0].message.startswith( 'The tokenizer class you load from this checkpoint is not the same type as the class this function' ' is called from.' ) ) lowerCamelCase__ : Optional[Any] = 'bert-base-cased' with self.assertLogs('transformers' , level='WARNING' ) as cm: BertJapaneseTokenizer.from_pretrained(UpperCAmelCase ) self.assertTrue( cm.records[0].message.startswith( 'The tokenizer class you load from this checkpoint is not the same type as the class this function' ' is called from.' ) )
50
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class a__( unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Dict = ViTImageProcessor if is_vision_available() else None @property def a_ ( self): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def a_ ( self): """simple docstring""" lowerCAmelCase = (3, 32, 128) lowerCAmelCase = tempfile.mkdtemp() # fmt: off lowerCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase)))) lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(__lowerCAmelCase) + """\n""") lowerCAmelCase = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 128}, } lowerCAmelCase = os.path.join(self.tmpdirname , __lowerCAmelCase) with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase) def a_ ( self , **__lowerCAmelCase): """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase) def a_ ( self , **__lowerCAmelCase): """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase) def a_ ( self): """simple docstring""" shutil.rmtree(self.tmpdirname) def a_ ( self): """simple docstring""" lowerCAmelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta) lowerCAmelCase = Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1)) return image_input def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_image_processor() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) processor.save_pretrained(self.tmpdirname) lowerCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab()) self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor.image_processor , __lowerCAmelCase) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_image_processor() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) processor.save_pretrained(self.tmpdirname) lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""") lowerCAmelCase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0) lowerCAmelCase = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , __lowerCAmelCase) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""np""") lowerCAmelCase = processor(images=__lowerCAmelCase , return_tensors="""np""") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = """test""" lowerCAmelCase = processor(text=__lowerCAmelCase) lowerCAmelCase = tokenizer(__lowerCAmelCase) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = """test""" lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase) self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""]) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase): processor() def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase = processor.char_decode(__lowerCAmelCase) lowerCAmelCase = tokenizer.batch_decode(__lowerCAmelCase) lowerCAmelCase = [seq.replace(""" """ , """""") for seq in decoded_tok] self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = None lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase) self.assertListEqual(list(inputs.keys()) , processor.model_input_names) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = torch.randn(1 , 27 , 38) lowerCAmelCase = torch.randn(1 , 27 , 50257) lowerCAmelCase = torch.randn(1 , 27 , 30522) lowerCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input]) self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
272
0
def A (__A : str , __A : int ) -> list: """simple docstring""" UpperCAmelCase_ = word.split() def justify(__A : list , __A : int , __A : int ) -> str: UpperCAmelCase_ = max_width - width UpperCAmelCase_ = len(__A ) if len(__A ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: UpperCAmelCase_ = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] UpperCAmelCase_ = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] UpperCAmelCase_ = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(__A ): num_spaces_between_words_list[i] += 1 UpperCAmelCase_ = [] for i in range(__A ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(__A ) UpperCAmelCase_ = [] UpperCAmelCase_ = [] UpperCAmelCase_ = 0 for word in words: if width + len(__A ) + len(__A ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(__A ) width += len(__A ) else: # justify the line and add it to result answer.append(justify(__A , __A , __A ) ) # reset new line and new width UpperCAmelCase_ , UpperCAmelCase_ = [word], len(__A ) UpperCAmelCase_ = max_width - width - len(__A ) answer.append(''' '''.join(__A ) + (remaining_spaces + 1) * ''' ''' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
51
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class a__( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Tuple = XLMRobertaTokenizer UpperCAmelCase_ : int = XLMRobertaTokenizerFast UpperCAmelCase_ : List[str] = True UpperCAmelCase_ : Optional[int] = True def a_ ( self): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase) tokenizer.save_pretrained(self.tmpdirname) def a_ ( self): """simple docstring""" lowerCAmelCase = """<pad>""" lowerCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase) , __lowerCAmelCase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase) , __lowerCAmelCase) def a_ ( self): """simple docstring""" lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , """<s>""") self.assertEqual(vocab_keys[1] , """<pad>""") self.assertEqual(vocab_keys[-1] , """<mask>""") self.assertEqual(len(__lowerCAmelCase) , 1002) def a_ ( self): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1002) def a_ ( self): """simple docstring""" lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase) lowerCAmelCase = tokenizer.tokenize("""This is a test""") self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""") self.assertListEqual( __lowerCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase) self.assertListEqual( __lowerCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) lowerCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase) self.assertListEqual( __lowerCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def a_ ( self): """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowerCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files)) lowerCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f) self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase) # Checks everything loads correctly in the same way lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__lowerCAmelCase) # Save tokenizer rust, legacy_format=True lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase) lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase) # Checks it save with the same files self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase) # Checks everything loads correctly in the same way lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase)) shutil.rmtree(__lowerCAmelCase) # Save tokenizer rust, legacy_format=False lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase) lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase)) shutil.rmtree(__lowerCAmelCase) @cached_property def a_ ( self): """simple docstring""" return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""") def a_ ( self): """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__lowerCAmelCase , f.name) lowerCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=__lowerCAmelCase) lowerCAmelCase = pickle.dumps(__lowerCAmelCase) pickle.loads(__lowerCAmelCase) def a_ ( self): """simple docstring""" if not self.test_rust_tokenizer: return lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_rust_tokenizer() lowerCAmelCase = """I was born in 92000, and this is falsé.""" lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase) lowerCAmelCase = rust_tokenizer.tokenize(__lowerCAmelCase) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase) lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) lowerCAmelCase = self.get_rust_tokenizer() lowerCAmelCase = tokenizer.encode(__lowerCAmelCase) lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) @slow def a_ ( self): """simple docstring""" lowerCAmelCase = """Hello World!""" lowerCAmelCase = [0, 35378, 6661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase)) @slow def a_ ( self): """simple docstring""" lowerCAmelCase = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) lowerCAmelCase = [ 0, 3293, 83, 10, 4552, 4989, 7986, 678, 10, 5915, 111, 179459, 124850, 4, 6044, 237, 12, 6, 5, 6, 4, 6780, 705, 15, 1388, 44, 378, 10114, 711, 152, 20, 6, 5, 22376, 642, 1221, 15190, 34153, 450, 5608, 959, 1119, 57702, 136, 186, 47, 1098, 29367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6044, 237, 6284, 50901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase)) @slow def a_ ( self): """simple docstring""" lowerCAmelCase = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCAmelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
272
0
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class A__ ( unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=3 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=True , A_=1 / 255 , A_=True , ): '''simple docstring''' UpperCamelCase : Dict = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} UpperCamelCase : List[str] = parent UpperCamelCase : Dict = batch_size UpperCamelCase : str = num_channels UpperCamelCase : Optional[Any] = min_resolution UpperCamelCase : Dict = max_resolution UpperCamelCase : int = do_resize UpperCamelCase : Any = size UpperCamelCase : Tuple = do_normalize UpperCamelCase : Optional[int] = image_mean UpperCamelCase : Union[str, Any] = image_std UpperCamelCase : Optional[int] = do_rescale UpperCamelCase : str = rescale_factor UpperCamelCase : int = do_pad def __UpperCamelCase( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __UpperCamelCase( self , A_ , A_=False ): '''simple docstring''' if not batched: UpperCamelCase : Optional[int] = image_inputs[0] if isinstance(A_ , Image.Image ): UpperCamelCase , UpperCamelCase : List[str] = image.size else: UpperCamelCase , UpperCamelCase : Tuple = image.shape[1], image.shape[2] if w < h: UpperCamelCase : Dict = int(self.size["shortest_edge"] * h / w ) UpperCamelCase : Dict = self.size["shortest_edge"] elif w > h: UpperCamelCase : Optional[int] = self.size["shortest_edge"] UpperCamelCase : Union[str, Any] = int(self.size["shortest_edge"] * w / h ) else: UpperCamelCase : List[str] = self.size["shortest_edge"] UpperCamelCase : Optional[int] = self.size["shortest_edge"] else: UpperCamelCase : List[Any] = [] for image in image_inputs: UpperCamelCase , UpperCamelCase : int = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCamelCase : List[Any] = max(A_ , key=lambda A_ : item[0] )[0] UpperCamelCase : int = max(A_ , key=lambda A_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :int = DeformableDetrImageProcessor if is_vision_available() else None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = DeformableDetrImageProcessingTester(self ) @property def __UpperCamelCase( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ , "image_mean" ) ) self.assertTrue(hasattr(A_ , "image_std" ) ) self.assertTrue(hasattr(A_ , "do_normalize" ) ) self.assertTrue(hasattr(A_ , "do_resize" ) ) self.assertTrue(hasattr(A_ , "do_rescale" ) ) self.assertTrue(hasattr(A_ , "do_pad" ) ) self.assertTrue(hasattr(A_ , "size" ) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} ) self.assertEqual(image_processor.do_pad , A_ ) UpperCamelCase : str = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ ) self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} ) self.assertEqual(image_processor.do_pad , A_ ) def __UpperCamelCase( self ): '''simple docstring''' pass def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ ) for image in image_inputs: self.assertIsInstance(A_ , Image.Image ) # Test not batched input UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values UpperCamelCase , UpperCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(A_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase , UpperCamelCase : int = self.image_processor_tester.get_expected_values(A_ , batched=A_ ) UpperCamelCase : Any = image_processing(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , np.ndarray ) # Test not batched input UpperCamelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values UpperCamelCase , UpperCamelCase : int = self.image_processor_tester.get_expected_values(A_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase : Optional[int] = image_processing(A_ , return_tensors="pt" ).pixel_values UpperCamelCase , UpperCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(A_ , batched=A_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , torch.Tensor ) # Test not batched input UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values UpperCamelCase , UpperCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(A_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase : Union[str, Any] = image_processing(A_ , return_tensors="pt" ).pixel_values UpperCamelCase , UpperCamelCase : Dict = self.image_processor_tester.get_expected_values(A_ , batched=A_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: UpperCamelCase : str = json.loads(f.read() ) UpperCamelCase : Dict = {"image_id": 3_9769, "annotations": target} # encode them UpperCamelCase : str = DeformableDetrImageProcessor() UpperCamelCase : Optional[Any] = image_processing(images=A_ , annotations=A_ , return_tensors="pt" ) # verify pixel values UpperCamelCase : Optional[int] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["pixel_values"].shape , A_ ) UpperCamelCase : Any = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A_ , atol=1e-4 ) ) # verify area UpperCamelCase : Union[str, Any] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A_ ) ) # verify boxes UpperCamelCase : List[str] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , A_ ) UpperCamelCase : List[Any] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A_ , atol=1e-3 ) ) # verify image_id UpperCamelCase : Optional[Any] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A_ ) ) # verify is_crowd UpperCamelCase : str = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A_ ) ) # verify class_labels UpperCamelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A_ ) ) # verify orig_size UpperCamelCase : Dict = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A_ ) ) # verify size UpperCamelCase : Any = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A_ ) ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: UpperCamelCase : Optional[int] = json.loads(f.read() ) UpperCamelCase : Any = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target} UpperCamelCase : Tuple = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them UpperCamelCase : List[Any] = DeformableDetrImageProcessor(format="coco_panoptic" ) UpperCamelCase : Dict = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors="pt" ) # verify pixel values UpperCamelCase : Any = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding["pixel_values"].shape , A_ ) UpperCamelCase : List[Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A_ , atol=1e-4 ) ) # verify area UpperCamelCase : Union[str, Any] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A_ ) ) # verify boxes UpperCamelCase : Dict = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , A_ ) UpperCamelCase : Any = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A_ , atol=1e-3 ) ) # verify image_id UpperCamelCase : Union[str, Any] = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A_ ) ) # verify is_crowd UpperCamelCase : Dict = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A_ ) ) # verify class_labels UpperCamelCase : Any = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A_ ) ) # verify masks UpperCamelCase : Dict = 82_2873 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , A_ ) # verify orig_size UpperCamelCase : List[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A_ ) ) # verify size UpperCamelCase : List[Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A_ ) )
52
'''simple docstring''' def snake_case__ ( _A: int , _A: int ) -> int: '''simple docstring''' while a != 0: lowerCAmelCase , lowerCAmelCase = b % a, a return b def snake_case__ ( _A: int , _A: int ) -> int: '''simple docstring''' if gcd(_A , _A ) != 1: lowerCAmelCase = f"mod inverse of {a!r} and {m!r} does not exist" raise ValueError(_A ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 0, a lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0, 1, m while va != 0: lowerCAmelCase = ua // va lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
272
0
'''simple docstring''' import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) a__ : Optional[int] =[ '''cross_validation.py''', '''gradient_accumulation.py''', '''local_sgd.py''', '''multi_process_metrics.py''', '''memory.py''', '''automatic_gradient_accumulation.py''', '''fsdp_with_peak_mem_tracking.py''', '''deepspeed_with_config_support.py''', '''megatron_lm_gpt_pretraining.py''', ] class snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCamelCase ( self : int , __A : str , __A : bool , __A : str = None , __A : list = None ): __UpperCamelCase = None __UpperCamelCase = os.path.abspath(os.path.join('examples' , 'by_feature' ) ) __UpperCamelCase = os.path.abspath('examples' ) for item in os.listdir(__A ): if item not in EXCLUDE_EXAMPLES: __UpperCamelCase = os.path.join(__A , __A ) if os.path.isfile(__A ) and ".py" in item_path: with self.subTest( tested_script=__A , feature_script=__A , tested_section='main()' if parser_only else 'training_function()' , ): __UpperCamelCase = compare_against_test( os.path.join(__A , __A ) , __A , __A , __A ) __UpperCamelCase = '\n'.join(__A ) if special_strings is not None: for string in special_strings: __UpperCamelCase = diff.replace(__A , '' ) self.assertEqual(__A , '' ) def _lowerCamelCase ( self : int ): self.one_complete_example('complete_nlp_example.py' , __A ) self.one_complete_example('complete_nlp_example.py' , __A ) def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) ) __UpperCamelCase = [ ' ' * 1_6 + '{\n\n', ' ' * 2_0 + '"accuracy": eval_metric["accuracy"],\n\n', ' ' * 2_0 + '"f1": eval_metric["f1"],\n\n', ' ' * 2_0 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n', ' ' * 2_0 + '"epoch": epoch,\n\n', ' ' * 1_6 + '},\n\n', ' ' * 1_6 + 'step=epoch,\n', ' ' * 1_2, ' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n', ] self.one_complete_example('complete_cv_example.py' , __A , __A , __A ) self.one_complete_example('complete_cv_example.py' , __A , __A , __A ) @mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} ) class snake_case ( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] =False @classmethod def _lowerCamelCase ( cls : Any ): super().setUpClass() __UpperCamelCase = tempfile.mkdtemp() __UpperCamelCase = os.path.join(cls._tmpdir , 'default_config.yml' ) write_basic_config(save_location=cls.configPath ) __UpperCamelCase = ['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def _lowerCamelCase ( cls : Any ): super().tearDownClass() shutil.rmtree(cls._tmpdir ) def _lowerCamelCase ( self : Any ): __UpperCamelCase = f''' examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} '''.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) ) def _lowerCamelCase ( self : str ): __UpperCamelCase = f''' examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} '''.split() __UpperCamelCase = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) ) def _lowerCamelCase ( self : List[str] ): __UpperCamelCase = f''' examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )} '''.split() __UpperCamelCase = run_command(self._launch_args + testargs , return_stdout=__A ) self.assertNotIn('epoch 0:' , __A ) self.assertIn('epoch 1:' , __A ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = f''' examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )} '''.split() __UpperCamelCase = run_command(self._launch_args + testargs , return_stdout=__A ) if torch.cuda.is_available(): __UpperCamelCase = torch.cuda.device_count() else: __UpperCamelCase = 1 if num_processes > 1: self.assertNotIn('epoch 0:' , __A ) self.assertIn('epoch 1:' , __A ) else: self.assertIn('epoch 0:' , __A ) self.assertIn('epoch 1:' , __A ) @slow def _lowerCamelCase ( self : Tuple ): __UpperCamelCase = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split() with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ): __UpperCamelCase = run_command(self._launch_args + testargs , return_stdout=__A ) __UpperCamelCase = re.findall('({.+})' , __A ) __UpperCamelCase = [r for r in results if 'accuracy' in r][-1] __UpperCamelCase = ast.literal_eval(__A ) self.assertGreaterEqual(results['accuracy'] , 0.75 ) def _lowerCamelCase ( self : Dict ): __UpperCamelCase = ['examples/by_feature/multi_process_metrics.py'] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def _lowerCamelCase ( self : List[str] ): with tempfile.TemporaryDirectory() as tmpdir: __UpperCamelCase = f''' examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} '''.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(__A , 'tracking' ) ) ) def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = ['examples/by_feature/gradient_accumulation.py'] run_command(self._launch_args + testargs ) def _lowerCamelCase ( self : List[Any] ): __UpperCamelCase = ['examples/by_feature/local_sgd.py'] run_command(self._launch_args + testargs )
53
'''simple docstring''' import math import flax.linen as nn import jax.numpy as jnp def snake_case__ ( _A: jnp.ndarray , _A: int , _A: float = 1 , _A: float = 1 , _A: float = 1.0e4 , _A: bool = False , _A: float = 1.0 , ) -> jnp.ndarray: '''simple docstring''' assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even" lowerCAmelCase = float(embedding_dim // 2 ) lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(_A , dtype=jnp.floataa ) * -log_timescale_increment ) lowerCAmelCase = jnp.expand_dims(_A , 1 ) * jnp.expand_dims(_A , 0 ) # scale embeddings lowerCAmelCase = scale * emb if flip_sin_to_cos: lowerCAmelCase = jnp.concatenate([jnp.cos(_A ), jnp.sin(_A )] , axis=1 ) else: lowerCAmelCase = jnp.concatenate([jnp.sin(_A ), jnp.cos(_A )] , axis=1 ) lowerCAmelCase = jnp.reshape(_A , [jnp.shape(_A )[0], embedding_dim] ) return signal class a__( nn.Module ): '''simple docstring''' UpperCAmelCase_ : int = 3_2 UpperCAmelCase_ : jnp.dtype = jnp.floataa @nn.compact def __call__( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""")(__lowerCAmelCase) lowerCAmelCase = nn.silu(__lowerCAmelCase) lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""")(__lowerCAmelCase) return temb class a__( nn.Module ): '''simple docstring''' UpperCAmelCase_ : int = 3_2 UpperCAmelCase_ : bool = False UpperCAmelCase_ : float = 1 @nn.compact def __call__( self , __lowerCAmelCase): """simple docstring""" return get_sinusoidal_embeddings( __lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
272
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class UpperCamelCase_ ( metaclass=UpperCamelCase): """simple docstring""" snake_case__ : Optional[Any] = ["keras_nlp"] def __init__( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]: requires_backends(self , ["keras_nlp"] )
54
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowercase = { '''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NezhaForNextSentencePrediction''', '''NezhaForMaskedLM''', '''NezhaForPreTraining''', '''NezhaForMultipleChoice''', '''NezhaForQuestionAnswering''', '''NezhaForSequenceClassification''', '''NezhaForTokenClassification''', '''NezhaModel''', '''NezhaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
272
0
'''simple docstring''' from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class snake_case ( lowercase ): """simple docstring""" @slow @require_torch def snake_case ( self ): """simple docstring""" lowerCamelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" ) lowerCamelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" ) lowerCamelCase_ = bertabert.config.encoder.vocab_size lowerCamelCase_ = tokenizer.sep_token_id lowerCamelCase_ = tokenizer.cls_token_id lowerCamelCase_ = 128 lowerCamelCase_ = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" ) lowerCamelCase_ = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" ) lowerCamelCase_ = train_dataset.select(range(32 ) ) lowerCamelCase_ = val_dataset.select(range(16 ) ) lowerCamelCase_ = 4 def _map_to_encoder_decoder_inputs(UpperCamelCase ): # Tokenizer will automatically set [BOS] <text> [EOS] lowerCamelCase_ = tokenizer(batch["article"] , padding="max_length" , truncation=UpperCamelCase , max_length=512 ) lowerCamelCase_ = tokenizer(batch["highlights"] , padding="max_length" , truncation=UpperCamelCase , max_length=128 ) lowerCamelCase_ = inputs.input_ids lowerCamelCase_ = inputs.attention_mask lowerCamelCase_ = outputs.input_ids lowerCamelCase_ = outputs.input_ids.copy() lowerCamelCase_ = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] lowerCamelCase_ = outputs.attention_mask assert all(len(UpperCamelCase ) == 512 for x in inputs.input_ids ) assert all(len(UpperCamelCase ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(UpperCamelCase ): lowerCamelCase_ = pred.label_ids lowerCamelCase_ = pred.predictions # all unnecessary tokens are removed lowerCamelCase_ = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) lowerCamelCase_ = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) lowerCamelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase ) )] ) / len(UpperCamelCase ) return {"accuracy": accuracy} # map train dataset lowerCamelCase_ = train_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCamelCase , batch_size=UpperCamelCase , remove_columns=["article", "highlights"] , ) train_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) # same for validation dataset lowerCamelCase_ = val_dataset.map( _map_to_encoder_decoder_inputs , batched=UpperCamelCase , batch_size=UpperCamelCase , remove_columns=["article", "highlights"] , ) val_dataset.set_format( type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , ) lowerCamelCase_ = self.get_auto_remove_tmp_dir() lowerCamelCase_ = SeqaSeqTrainingArguments( output_dir=UpperCamelCase , per_device_train_batch_size=UpperCamelCase , per_device_eval_batch_size=UpperCamelCase , predict_with_generate=UpperCamelCase , evaluation_strategy="steps" , do_train=UpperCamelCase , do_eval=UpperCamelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer lowerCamelCase_ = SeqaSeqTrainer( model=UpperCamelCase , args=UpperCamelCase , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , tokenizer=UpperCamelCase , ) # start training trainer.train()
55
'''simple docstring''' from math import sqrt def snake_case__ ( _A: int = 1000000 ) -> int: '''simple docstring''' lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 42 while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_A , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f'{solution() = }')
272
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices a : Any = logging.get_logger(__name__) a : Tuple = { 'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json', } class a ( _lowerCamelCase , _lowerCamelCase ): snake_case_ = "bit" snake_case_ = ["preactivation", "bottleneck"] snake_case_ = ["SAME", "VALID"] def __init__( self : Tuple , lowercase_ : Union[str, Any]=3 , lowercase_ : Tuple=64 , lowercase_ : Optional[int]=[256, 512, 1024, 2048] , lowercase_ : Dict=[3, 4, 6, 3] , lowercase_ : Any="preactivation" , lowercase_ : str="relu" , lowercase_ : List[Any]=None , lowercase_ : List[Any]=32 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[Any]=False , lowercase_ : Union[str, Any]=32 , lowercase_ : str=1 , lowercase_ : List[Any]=None , lowercase_ : List[str]=None , **lowercase_ : Tuple , ): super().__init__(**lowercase_ ) if layer_type not in self.layer_types: raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" ) if global_padding is not None: if global_padding.upper() in self.supported_padding: snake_case_ = global_padding.upper() else: raise ValueError(F"Padding strategy {global_padding} not supported" ) snake_case_ = num_channels snake_case_ = embedding_size snake_case_ = hidden_sizes snake_case_ = depths snake_case_ = layer_type snake_case_ = hidden_act snake_case_ = global_padding snake_case_ = num_groups snake_case_ = drop_path_rate snake_case_ = embedding_dynamic_padding snake_case_ = output_stride snake_case_ = width_factor snake_case_ = ['''stem'''] + [F"stage{idx}" for idx in range(1 , len(lowercase_ ) + 1 )] snake_case_ ,snake_case_ = get_aligned_output_features_output_indices( out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
56
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowercase = { '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
272
0
"""simple docstring""" import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = filter(lambda _UpperCamelCase : p.requires_grad , model.parameters() ) __lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] ) return params A : List[str] = logging.getLogger(__name__) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' if metric == "rouge2": __lowerCAmelCase = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": __lowerCAmelCase = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": __lowerCAmelCase = "{val_avg_em:.4f}-{step_count}" else: raise NotImplementedError( f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this" " function." ) __lowerCAmelCase = ModelCheckpoint( dirpath=_UpperCamelCase , filename=_UpperCamelCase , monitor=f"val_{metric}" , mode="max" , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' return EarlyStopping( monitor=f"val_{metric}" , mode="min" if "loss" in metric else "max" , patience=_UpperCamelCase , verbose=_UpperCamelCase , ) class _UpperCamelCase ( pl.Callback ): '''simple docstring''' def snake_case ( self , __a , __a ): __lowerCAmelCase = {f"lr_group_{i}": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(__a ) @rank_zero_only def snake_case ( self , __a , __a , __a , __a=True ): logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****" ) __lowerCAmelCase = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results __lowerCAmelCase = Path(pl_module.hparams.output_dir ) if type_path == "test": __lowerCAmelCase = od / "test_results.txt" __lowerCAmelCase = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __lowerCAmelCase = od / f"{type_path}_results/{trainer.global_step:05d}.txt" __lowerCAmelCase = od / f"{type_path}_generations/{trainer.global_step:05d}.txt" results_file.parent.mkdir(exist_ok=__a ) generations_file.parent.mkdir(exist_ok=__a ) with open(__a , "a+" ) as writer: for key in sorted(__a ): if key in ["log", "progress_bar", "preds"]: continue __lowerCAmelCase = metrics[key] if isinstance(__a , torch.Tensor ): __lowerCAmelCase = val.item() __lowerCAmelCase = f"{key}: {val:.6f}\n" writer.write(__a ) if not save_generations: return if "preds" in metrics: __lowerCAmelCase = "\n".join(metrics["preds"] ) generations_file.open("w+" ).write(__a ) @rank_zero_only def snake_case ( self , __a , __a ): try: __lowerCAmelCase = pl_module.model.model.num_parameters() except AttributeError: __lowerCAmelCase = pl_module.model.num_parameters() __lowerCAmelCase = count_trainable_parameters(__a ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} ) @rank_zero_only def snake_case ( self , __a , __a ): save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(__a , __a , "test" ) @rank_zero_only def snake_case ( self , __a , __a ): save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
57
'''simple docstring''' import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class a__( unittest.TestCase ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , ): """simple docstring""" lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18} lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = num_channels lowerCAmelCase = image_size lowerCAmelCase = min_resolution lowerCAmelCase = max_resolution lowerCAmelCase = do_resize lowerCAmelCase = size lowerCAmelCase = do_normalize lowerCAmelCase = image_mean lowerCAmelCase = image_std def a_ ( self): """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class a__( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Tuple = DPTImageProcessor if is_vision_available() else None def a_ ( self): """simple docstring""" lowerCAmelCase = DPTImageProcessingTester(self) @property def a_ ( self): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__lowerCAmelCase , """image_mean""")) self.assertTrue(hasattr(__lowerCAmelCase , """image_std""")) self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize""")) self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""")) self.assertTrue(hasattr(__lowerCAmelCase , """size""")) def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18}) lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42}) def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict) # create random PIL images lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
272
0
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class a_ ( snake_case_ ): '''simple docstring''' UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None class a_ ( snake_case_ ): '''simple docstring''' def __init__( self , A=1 , A=0 , A=2 , A=512 , A="cls" , A=False , A=True , **A , ) -> int: super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A ) _SCREAMING_SNAKE_CASE = project_dim _SCREAMING_SNAKE_CASE = pooler_fn _SCREAMING_SNAKE_CASE = learn_encoder _SCREAMING_SNAKE_CASE = use_attention_mask class a_ ( snake_case_ ): '''simple docstring''' UpperCamelCase = [R'''pooler''', R'''logit_scale'''] UpperCamelCase = [R'''position_ids''', R'''predictions.decoder.bias'''] UpperCamelCase = '''roberta''' UpperCamelCase = RobertaSeriesConfig def __init__( self , A ) -> Optional[int]: super().__init__(A ) _SCREAMING_SNAKE_CASE = XLMRobertaModel(A ) _SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.project_dim ) _SCREAMING_SNAKE_CASE = getattr(A , """has_pre_transformation""" , A ) if self.has_pre_transformation: _SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.project_dim ) _SCREAMING_SNAKE_CASE = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps ) self.post_init() def snake_case_( self , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , ) -> Any: _SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict _SCREAMING_SNAKE_CASE = self.base_model( input_ids=A , attention_mask=A , token_type_ids=A , position_ids=A , head_mask=A , inputs_embeds=A , encoder_hidden_states=A , encoder_attention_mask=A , output_attentions=A , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=A , ) if self.has_pre_transformation: _SCREAMING_SNAKE_CASE = outputs["""hidden_states"""][-2] _SCREAMING_SNAKE_CASE = self.pre_LN(A ) _SCREAMING_SNAKE_CASE = self.transformation_pre(A ) return TransformationModelOutput( projection_state=A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) else: _SCREAMING_SNAKE_CASE = self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
58
'''simple docstring''' from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def snake_case__ ( _A: Union[str, Any] , _A: Tuple , _A: Any=1e-12 ) -> str: '''simple docstring''' lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T return jnp.matmul(_A , norm_emb_a.T ) class a__( nn.Module ): '''simple docstring''' UpperCAmelCase_ : CLIPConfig UpperCAmelCase_ : jnp.dtype = jnp.floataa def a_ ( self): """simple docstring""" lowerCAmelCase = FlaxCLIPVisionModule(self.config.vision_config) lowerCAmelCase = nn.Dense(self.config.projection_dim , use_bias=__lowerCAmelCase , dtype=self.dtype) lowerCAmelCase = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim)) lowerCAmelCase = self.param( """special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim)) lowerCAmelCase = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,)) lowerCAmelCase = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,)) def __call__( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = self.vision_model(__lowerCAmelCase)[1] lowerCAmelCase = self.visual_projection(__lowerCAmelCase) lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.special_care_embeds) lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.concept_embeds) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs lowerCAmelCase = 0.0 lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment lowerCAmelCase = jnp.round(__lowerCAmelCase , 3) lowerCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCAmelCase) # Use a lower threshold if an image has any special care concept lowerCAmelCase = is_special_care * 0.01 lowerCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment lowerCAmelCase = jnp.round(__lowerCAmelCase , 3) lowerCAmelCase = jnp.any(concept_scores > 0 , axis=1) return has_nsfw_concepts class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : int = CLIPConfig UpperCAmelCase_ : Any = '''clip_input''' UpperCAmelCase_ : List[str] = FlaxStableDiffusionSafetyCheckerModule def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = jnp.floataa , __lowerCAmelCase = True , **__lowerCAmelCase , ): """simple docstring""" if input_shape is None: lowerCAmelCase = (1, 224, 224, 3) lowerCAmelCase = self.module_class(config=__lowerCAmelCase , dtype=__lowerCAmelCase , **__lowerCAmelCase) super().__init__(__lowerCAmelCase , __lowerCAmelCase , input_shape=__lowerCAmelCase , seed=__lowerCAmelCase , dtype=__lowerCAmelCase , _do_init=_do_init) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None): """simple docstring""" lowerCAmelCase = jax.random.normal(__lowerCAmelCase , __lowerCAmelCase) lowerCAmelCase , lowerCAmelCase = jax.random.split(__lowerCAmelCase) lowerCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng} lowerCAmelCase = self.module.init(__lowerCAmelCase , __lowerCAmelCase)["""params"""] return random_params def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , ): """simple docstring""" lowerCAmelCase = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1)) return self.module.apply( {"""params""": params or self.params} , jnp.array(__lowerCAmelCase , dtype=jnp.floataa) , rngs={} , )
272
0
def UpperCamelCase ( __lowerCamelCase : int ): snake_case : Optional[Any] = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def UpperCamelCase ( __lowerCamelCase : int = 5000 ): snake_case : Optional[Any] = [(i * (3 * i - 1)) // 2 for i in range(1 , __lowerCamelCase )] for i, pentagonal_i in enumerate(__lowerCamelCase ): for j in range(__lowerCamelCase , len(__lowerCamelCase ) ): snake_case : List[Any] = pentagonal_nums[j] snake_case : Union[str, Any] = pentagonal_i + pentagonal_j snake_case : List[Any] = pentagonal_j - pentagonal_i if is_pentagonal(__lowerCamelCase ) and is_pentagonal(__lowerCamelCase ): return b return -1 if __name__ == "__main__": print(F'{solution() = }')
59
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class a__( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Dict = MvpTokenizer UpperCAmelCase_ : Optional[Any] = MvpTokenizerFast UpperCAmelCase_ : str = True UpperCAmelCase_ : List[Any] = filter_roberta_detectors def a_ ( self): """simple docstring""" super().setUp() lowerCAmelCase = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase)))) lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] lowerCAmelCase = {"""unk_token""": """<unk>"""} lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(__lowerCAmelCase) + """\n""") with open(self.merges_file , """w""" , encoding="""utf-8""") as fp: fp.write("""\n""".join(__lowerCAmelCase)) def a_ ( self , **__lowerCAmelCase): """simple docstring""" kwargs.update(self.special_tokens_map) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase) def a_ ( self , **__lowerCAmelCase): """simple docstring""" kwargs.update(self.special_tokens_map) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase) def a_ ( self , __lowerCAmelCase): """simple docstring""" return "lower newer", "lower newer" @cached_property def a_ ( self): """simple docstring""" return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""") @cached_property def a_ ( self): """simple docstring""" return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""") @require_torch def a_ ( self): """simple docstring""" lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] lowerCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase = tokenizer(__lowerCAmelCase , max_length=len(__lowerCAmelCase) , padding=__lowerCAmelCase , return_tensors="""pt""") self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase) self.assertEqual((2, 9) , batch.input_ids.shape) self.assertEqual((2, 9) , batch.attention_mask.shape) lowerCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) # Test that special tokens are reset @require_torch def a_ ( self): """simple docstring""" lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""") # check if input_ids are returned and no labels self.assertIn("""input_ids""" , __lowerCAmelCase) self.assertIn("""attention_mask""" , __lowerCAmelCase) self.assertNotIn("""labels""" , __lowerCAmelCase) self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase) @require_torch def a_ ( self): """simple docstring""" lowerCAmelCase = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase = tokenizer(text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""") self.assertEqual(32 , targets["""input_ids"""].shape[1]) @require_torch def a_ ( self): """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase = tokenizer( ["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""") self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase) self.assertEqual(batch.input_ids.shape , (2, 1024)) @require_torch def a_ ( self): """simple docstring""" lowerCAmelCase = ["""A long paragraph for summarization."""] lowerCAmelCase = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase , return_tensors="""pt""") lowerCAmelCase = inputs["""input_ids"""] lowerCAmelCase = inputs["""labels"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item()) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item()) def a_ ( self): """simple docstring""" pass def a_ ( self): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = """A, <mask> AllenNLP sentence.""" lowerCAmelCase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase) lowerCAmelCase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""])) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , ) lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""]) lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""]) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual( __lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""]) self.assertSequenceEqual( __lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
272
0
"""simple docstring""" from __future__ import annotations from decimal import Decimal from numpy import array def _snake_case ( _snake_case : list[list[float]] ): lowerCAmelCase : str = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(_snake_case ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2: # Calculate the determinant of the matrix lowerCAmelCase : int = float( d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) ) if determinant == 0: raise ValueError('''This matrix has no inverse.''' ) # Creates a copy of the matrix with swapped positions of the elements lowerCAmelCase : Optional[int] = [[0.0, 0.0], [0.0, 0.0]] lowerCAmelCase, lowerCAmelCase : List[Any] = matrix[1][1], matrix[0][0] lowerCAmelCase, lowerCAmelCase : Union[str, Any] = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(_snake_case ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(_snake_case ) == 3 and len(matrix[0] ) == 3 and len(matrix[1] ) == 3 and len(matrix[2] ) == 3 ): # Calculate the determinant of the matrix using Sarrus rule lowerCAmelCase : int = float( ( (d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] )) + (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] )) + (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] )) ) - ( (d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] )) + (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] )) + (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] )) ) ) if determinant == 0: raise ValueError('''This matrix has no inverse.''' ) # Creating cofactor matrix lowerCAmelCase : Dict = [ [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], [d(0.0 ), d(0.0 ), d(0.0 )], ] lowerCAmelCase : List[str] = (d(matrix[1][1] ) * d(matrix[2][2] )) - ( d(matrix[1][2] ) * d(matrix[2][1] ) ) lowerCAmelCase : Dict = -( (d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] )) ) lowerCAmelCase : str = (d(matrix[1][0] ) * d(matrix[2][1] )) - ( d(matrix[1][1] ) * d(matrix[2][0] ) ) lowerCAmelCase : Any = -( (d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] )) ) lowerCAmelCase : Any = (d(matrix[0][0] ) * d(matrix[2][2] )) - ( d(matrix[0][2] ) * d(matrix[2][0] ) ) lowerCAmelCase : Optional[int] = -( (d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] )) ) lowerCAmelCase : Optional[int] = (d(matrix[0][1] ) * d(matrix[1][2] )) - ( d(matrix[0][2] ) * d(matrix[1][1] ) ) lowerCAmelCase : Dict = -( (d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] )) ) lowerCAmelCase : List[Any] = (d(matrix[0][0] ) * d(matrix[1][1] )) - ( d(matrix[0][1] ) * d(matrix[1][0] ) ) # Transpose the cofactor matrix (Adjoint matrix) lowerCAmelCase : str = array(_snake_case ) for i in range(3 ): for j in range(3 ): lowerCAmelCase : Optional[Any] = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix lowerCAmelCase : Tuple = array(_snake_case ) for i in range(3 ): for j in range(3 ): inverse_matrix[i][j] /= d(_snake_case ) # Calculate the inverse of the matrix return [[float(d(_snake_case ) ) or 0.0 for n in row] for row in inverse_matrix] raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
60
'''simple docstring''' import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class a__( enum.Enum ): '''simple docstring''' UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : Dict = 1 UpperCAmelCase_ : Any = 2 @add_end_docstrings(lowerCAmelCase__ ) class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : int = ''' In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> ''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" super().__init__(*__lowerCAmelCase , **__lowerCAmelCase) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. lowerCAmelCase = None if self.model.config.prefix is not None: lowerCAmelCase = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. lowerCAmelCase = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params) lowerCAmelCase = {**self._preprocess_params, **preprocess_params} lowerCAmelCase = {**self._forward_params, **forward_params} def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ): """simple docstring""" lowerCAmelCase = {} if prefix is not None: lowerCAmelCase = prefix if prefix: lowerCAmelCase = self.tokenizer( __lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework) lowerCAmelCase = prefix_inputs["""input_ids"""].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected" """ [None, 'hole']""") lowerCAmelCase = handle_long_generation preprocess_params.update(__lowerCAmelCase) lowerCAmelCase = generate_kwargs lowerCAmelCase = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""") if return_tensors is not None: raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""") lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""") lowerCAmelCase = ReturnType.TENSORS if return_type is not None: lowerCAmelCase = return_type if clean_up_tokenization_spaces is not None: lowerCAmelCase = clean_up_tokenization_spaces if stop_sequence is not None: lowerCAmelCase = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase) if len(__lowerCAmelCase) > 1: warnings.warn( """Stopping on a multiple token sequence is not yet supported on transformers. The first token of""" """ the stop sequence will be used as the stop sequence string in the interim.""") lowerCAmelCase = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({"""add_space_before_punct_symbol""": True}) return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase) def __call__( self , __lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" return super().__call__(__lowerCAmelCase , **__lowerCAmelCase) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , **__lowerCAmelCase): """simple docstring""" lowerCAmelCase = self.tokenizer( prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework) lowerCAmelCase = prompt_text if handle_long_generation == "hole": lowerCAmelCase = inputs["""input_ids"""].shape[-1] if "max_new_tokens" in generate_kwargs: lowerCAmelCase = generate_kwargs["""max_new_tokens"""] else: lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len if new_tokens < 0: raise ValueError("""We cannot infer how many new tokens are expected""") if cur_len + new_tokens > self.tokenizer.model_max_length: lowerCAmelCase = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( """We cannot use `hole` to handle this generation the number of desired tokens exceeds the""" """ models max length""") lowerCAmelCase = inputs["""input_ids"""][:, -keep_length:] if "attention_mask" in inputs: lowerCAmelCase = inputs["""attention_mask"""][:, -keep_length:] return inputs def a_ ( self , __lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" lowerCAmelCase = model_inputs["""input_ids"""] lowerCAmelCase = model_inputs.get("""attention_mask""" , __lowerCAmelCase) # Allow empty prompts if input_ids.shape[1] == 0: lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = 1 else: lowerCAmelCase = input_ids.shape[0] lowerCAmelCase = model_inputs.pop("""prompt_text""") # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. lowerCAmelCase = generate_kwargs.pop("""prefix_length""" , 0) if prefix_length > 0: lowerCAmelCase = """max_new_tokens""" in generate_kwargs or ( """generation_config""" in generate_kwargs and generate_kwargs["""generation_config"""].max_new_tokens is not None ) if not has_max_new_tokens: lowerCAmelCase = generate_kwargs.get("""max_length""") or self.model.config.max_length generate_kwargs["max_length"] += prefix_length lowerCAmelCase = """min_new_tokens""" in generate_kwargs or ( """generation_config""" in generate_kwargs and generate_kwargs["""generation_config"""].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL lowerCAmelCase = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = generated_sequence.shape[0] if self.framework == "pt": lowerCAmelCase = generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:]) elif self.framework == "tf": lowerCAmelCase = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:])) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.FULL_TEXT , __lowerCAmelCase=True): """simple docstring""" lowerCAmelCase = model_outputs["""generated_sequence"""][0] lowerCAmelCase = model_outputs["""input_ids"""] lowerCAmelCase = model_outputs["""prompt_text"""] lowerCAmelCase = generated_sequence.numpy().tolist() lowerCAmelCase = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: lowerCAmelCase = {"""generated_token_ids""": sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text lowerCAmelCase = self.tokenizer.decode( __lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: lowerCAmelCase = 0 else: lowerCAmelCase = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )) if return_type == ReturnType.FULL_TEXT: lowerCAmelCase = prompt_text + text[prompt_length:] else: lowerCAmelCase = text[prompt_length:] lowerCAmelCase = {"""generated_text""": all_text} records.append(__lowerCAmelCase) return records
272
0
"""simple docstring""" from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=lowercase__ ) class A_ (lowercase__ ): '''simple docstring''' # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization SCREAMING_SNAKE_CASE__ : str = field(default="""question-answering-extractive""" ,metadata={"""include_in_asdict_even_if_is_default""": True} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features( { """answers""": Sequence( { """text""": Value("""string""" ), """answer_start""": Value("""int32""" ), } ) } ) SCREAMING_SNAKE_CASE__ : str = "question" SCREAMING_SNAKE_CASE__ : str = "context" SCREAMING_SNAKE_CASE__ : str = "answers" @property def UpperCamelCase__ ( self ): """simple docstring""" return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
61
'''simple docstring''' def snake_case__ ( _A: str ) -> list[int]: '''simple docstring''' lowerCAmelCase = [0 for i in range(len(_A ) )] # initialize interval's left pointer and right pointer lowerCAmelCase , lowerCAmelCase = 0, 0 for i in range(1 , len(_A ) ): # case when current index is inside the interval if i <= right_pointer: lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] ) lowerCAmelCase = min_edge while go_next(_A , _A , _A ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: lowerCAmelCase , lowerCAmelCase = i, i + z_result[i] - 1 return z_result def snake_case__ ( _A: int , _A: list[int] , _A: str ) -> bool: '''simple docstring''' return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]] def snake_case__ ( _A: str , _A: str ) -> int: '''simple docstring''' lowerCAmelCase = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string lowerCAmelCase = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(_A ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
272
0
import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class UpperCAmelCase__ ( unittest.TestCase ): """simple docstring""" def _a ( self ) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() def _a ( self ) -> List[Any]: __UpperCamelCase , __UpperCamelCase =FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-canny' , from_pt=A_ , dtype=jnp.bfloataa ) __UpperCamelCase , __UpperCamelCase =FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , controlnet=A_ , from_pt=A_ , dtype=jnp.bfloataa ) __UpperCamelCase =controlnet_params __UpperCamelCase ='bird' __UpperCamelCase =jax.device_count() __UpperCamelCase =pipe.prepare_text_inputs([prompts] * num_samples ) __UpperCamelCase =load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ) __UpperCamelCase =pipe.prepare_image_inputs([canny_image] * num_samples ) __UpperCamelCase =jax.random.PRNGKey(0 ) __UpperCamelCase =jax.random.split(A_ , jax.device_count() ) __UpperCamelCase =replicate(A_ ) __UpperCamelCase =shard(A_ ) __UpperCamelCase =shard(A_ ) __UpperCamelCase =pipe( prompt_ids=A_ , image=A_ , params=A_ , prng_seed=A_ , num_inference_steps=50 , jit=A_ , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) __UpperCamelCase =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __UpperCamelCase =images[0, 253:256, 253:256, -1] __UpperCamelCase =jnp.asarray(jax.device_get(image_slice.flatten() ) ) __UpperCamelCase =jnp.array( [0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def _a ( self ) -> Optional[Any]: __UpperCamelCase , __UpperCamelCase =FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-openpose' , from_pt=A_ , dtype=jnp.bfloataa ) __UpperCamelCase , __UpperCamelCase =FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , controlnet=A_ , from_pt=A_ , dtype=jnp.bfloataa ) __UpperCamelCase =controlnet_params __UpperCamelCase ='Chef in the kitchen' __UpperCamelCase =jax.device_count() __UpperCamelCase =pipe.prepare_text_inputs([prompts] * num_samples ) __UpperCamelCase =load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' ) __UpperCamelCase =pipe.prepare_image_inputs([pose_image] * num_samples ) __UpperCamelCase =jax.random.PRNGKey(0 ) __UpperCamelCase =jax.random.split(A_ , jax.device_count() ) __UpperCamelCase =replicate(A_ ) __UpperCamelCase =shard(A_ ) __UpperCamelCase =shard(A_ ) __UpperCamelCase =pipe( prompt_ids=A_ , image=A_ , params=A_ , prng_seed=A_ , num_inference_steps=50 , jit=A_ , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) __UpperCamelCase =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __UpperCamelCase =images[0, 253:256, 253:256, -1] __UpperCamelCase =jnp.asarray(jax.device_get(image_slice.flatten() ) ) __UpperCamelCase =jnp.array( [[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
62
'''simple docstring''' from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : str = '''EncodecFeatureExtractor''' UpperCAmelCase_ : Dict = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" super().__init__(__lowerCAmelCase , __lowerCAmelCase) lowerCAmelCase = self.feature_extractor lowerCAmelCase = False def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True): """simple docstring""" return self.tokenizer.get_decoder_prompt_ids(task=__lowerCAmelCase , language=__lowerCAmelCase , no_timestamps=__lowerCAmelCase) def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase) lowerCAmelCase = kwargs.pop("""sampling_rate""" , __lowerCAmelCase) lowerCAmelCase = kwargs.pop("""text""" , __lowerCAmelCase) if len(__lowerCAmelCase) > 0: lowerCAmelCase = args[0] lowerCAmelCase = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""") if text is not None: lowerCAmelCase = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase) if audio is not None: lowerCAmelCase = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase) if audio is None: return inputs elif text is None: return audio_inputs else: lowerCAmelCase = audio_inputs["""input_values"""] if "padding_mask" in audio_inputs: lowerCAmelCase = audio_inputs["""padding_mask"""] return inputs def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase) lowerCAmelCase = kwargs.pop("""padding_mask""" , __lowerCAmelCase) if len(__lowerCAmelCase) > 0: lowerCAmelCase = args[0] lowerCAmelCase = args[1:] if audio_values is not None: return self._decode_audio(__lowerCAmelCase , padding_mask=__lowerCAmelCase) else: return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase) def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None): """simple docstring""" lowerCAmelCase = to_numpy(__lowerCAmelCase) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = audio_values.shape if padding_mask is None: return list(__lowerCAmelCase) lowerCAmelCase = to_numpy(__lowerCAmelCase) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) lowerCAmelCase = seq_len - padding_mask.shape[-1] lowerCAmelCase = 1 - self.feature_extractor.padding_value lowerCAmelCase = np.pad(__lowerCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__lowerCAmelCase) lowerCAmelCase = audio_values.tolist() for i in range(__lowerCAmelCase): lowerCAmelCase = np.asarray(audio_values[i])[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] lowerCAmelCase = sliced_audio.reshape(__lowerCAmelCase , -1) return audio_values
272
0
'''simple docstring''' from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def _lowerCamelCase ( lowercase : bool = True , *lowercase : Any , **lowercase : List[Any] ) -> Any: if not is_tqdm_available(): raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." ) _a = False if main_process_only: _a = PartialState().local_process_index == 0 return _tqdm(*lowercase , **lowercase , disable=lowercase )
63
'''simple docstring''' import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class a__( unittest.TestCase ): '''simple docstring''' @property def a_ ( self): """simple docstring""" torch.manual_seed(0) lowerCAmelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def a_ ( self): """simple docstring""" lowerCAmelCase = self.dummy_uncond_unet lowerCAmelCase = PNDMScheduler() lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase) pndm.to(__lowerCAmelCase) pndm.set_progress_bar_config(disable=__lowerCAmelCase) lowerCAmelCase = torch.manual_seed(0) lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""").images lowerCAmelCase = torch.manual_seed(0) lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=__lowerCAmelCase)[0] lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 @slow @require_torch class a__( unittest.TestCase ): '''simple docstring''' def a_ ( self): """simple docstring""" lowerCAmelCase = """google/ddpm-cifar10-32""" lowerCAmelCase = UNetaDModel.from_pretrained(__lowerCAmelCase) lowerCAmelCase = PNDMScheduler() lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase) pndm.to(__lowerCAmelCase) pndm.set_progress_bar_config(disable=__lowerCAmelCase) lowerCAmelCase = torch.manual_seed(0) lowerCAmelCase = pndm(generator=__lowerCAmelCase , output_type="""numpy""").images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
272
0
"""simple docstring""" import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class lowercase( __a , __a , unittest.TestCase ): '''simple docstring''' lowercase__ = AutoencoderKL lowercase__ = "sample" lowercase__ = 1e-2 @property def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : List[Any] = 4 _snake_case : Tuple = 3 _snake_case : Dict = (32, 32) _snake_case : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(a_ ) return {"sample": image} @property def UpperCamelCase_ ( self: str ): '''simple docstring''' return (3, 32, 32) @property def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' return (3, 32, 32) def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Tuple = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } _snake_case : Dict = self.dummy_input return init_dict, inputs_dict def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' pass def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' pass @unittest.skipIf(torch_device == """mps""", """Gradient checkpointing skipped on MPS""" ) def UpperCamelCase_ ( self: List[Any] ): '''simple docstring''' _snake_case , _snake_case : Optional[Any] = self.prepare_init_args_and_inputs_for_common() _snake_case : str = self.model_class(**a_ ) model.to(a_ ) assert not model.is_gradient_checkpointing and model.training _snake_case : List[str] = model(**a_ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() _snake_case : str = torch.randn_like(a_ ) _snake_case : int = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing _snake_case : int = self.model_class(**a_ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(a_ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training _snake_case : Tuple = model_a(**a_ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() _snake_case : int = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) _snake_case : List[Any] = dict(model.named_parameters() ) _snake_case : List[Any] = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data, named_params_a[name].grad.data, atol=5E-5 ) ) def UpperCamelCase_ ( self: List[str] ): '''simple docstring''' _snake_case , _snake_case : Optional[int] = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""", output_loading_info=a_ ) self.assertIsNotNone(a_ ) self.assertEqual(len(loading_info["""missing_keys"""] ), 0 ) model.to(a_ ) _snake_case : List[Any] = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : str = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ) _snake_case : Tuple = model.to(a_ ) model.eval() if torch_device == "mps": _snake_case : int = torch.manual_seed(0 ) else: _snake_case : Tuple = torch.Generator(device=a_ ).manual_seed(0 ) _snake_case : List[str] = torch.randn( 1, model.config.in_channels, model.config.sample_size, model.config.sample_size, generator=torch.manual_seed(0 ), ) _snake_case : Union[str, Any] = image.to(a_ ) with torch.no_grad(): _snake_case : List[str] = model(a_, sample_posterior=a_, generator=a_ ).sample _snake_case : Tuple = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": _snake_case : Any = torch.tensor( [ -4.00_78E-01, -3.83_23E-04, -1.26_81E-01, -1.14_62E-01, 2.00_95E-01, 1.08_93E-01, -8.82_47E-02, -3.03_61E-01, -9.86_44E-03, ] ) elif torch_device == "cpu": _snake_case : Dict = torch.tensor( [-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] ) else: _snake_case : List[Any] = torch.tensor( [-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] ) self.assertTrue(torch_all_close(a_, a_, rtol=1E-2 ) ) @slow class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: List[str], a_: List[Any], a_: List[Any] ): '''simple docstring''' return f"gaussian_noise_s={seed}_shape={'_'.join([str(a_ ) for s in shape] )}.npy" def UpperCamelCase_ ( self: str ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self: str, a_: List[str]=0, a_: Tuple=(4, 3, 512, 512), a_: Optional[Any]=False ): '''simple docstring''' _snake_case : str = torch.floataa if fpaa else torch.floataa _snake_case : int = torch.from_numpy(load_hf_numpy(self.get_file_format(a_, a_ ) ) ).to(a_ ).to(a_ ) return image def UpperCamelCase_ ( self: Any, a_: Optional[int]="CompVis/stable-diffusion-v1-4", a_: str=False ): '''simple docstring''' _snake_case : str = """fp16""" if fpaa else None _snake_case : Optional[int] = torch.floataa if fpaa else torch.floataa _snake_case : Union[str, Any] = AutoencoderKL.from_pretrained( a_, subfolder="""vae""", torch_dtype=a_, revision=a_, ) model.to(a_ ).eval() return model def UpperCamelCase_ ( self: Union[str, Any], a_: List[str]=0 ): '''simple docstring''' if torch_device == "mps": return torch.manual_seed(a_ ) return torch.Generator(device=a_ ).manual_seed(a_ ) @parameterized.expand( [ # fmt: off [33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def UpperCamelCase_ ( self: Dict, a_: Any, a_: Any, a_: int ): '''simple docstring''' _snake_case : str = self.get_sd_vae_model() _snake_case : str = self.get_sd_image(a_ ) _snake_case : Dict = self.get_generator(a_ ) with torch.no_grad(): _snake_case : str = model(a_, generator=a_, sample_posterior=a_ ).sample assert sample.shape == image.shape _snake_case : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu() _snake_case : Optional[int] = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice ) assert torch_all_close(a_, a_, atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]], [47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase_ ( self: Tuple, a_: int, a_: Dict ): '''simple docstring''' _snake_case : Tuple = self.get_sd_vae_model(fpaa=a_ ) _snake_case : Tuple = self.get_sd_image(a_, fpaa=a_ ) _snake_case : Tuple = self.get_generator(a_ ) with torch.no_grad(): _snake_case : Optional[int] = model(a_, generator=a_, sample_posterior=a_ ).sample assert sample.shape == image.shape _snake_case : Dict = sample[-1, -2:, :2, -2:].flatten().float().cpu() _snake_case : Tuple = torch.tensor(a_ ) assert torch_all_close(a_, a_, atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def UpperCamelCase_ ( self: List[str], a_: Tuple, a_: List[Any], a_: int ): '''simple docstring''' _snake_case : List[Any] = self.get_sd_vae_model() _snake_case : Union[str, Any] = self.get_sd_image(a_ ) with torch.no_grad(): _snake_case : Optional[int] = model(a_ ).sample assert sample.shape == image.shape _snake_case : int = sample[-1, -2:, -2:, :2].flatten().float().cpu() _snake_case : Optional[Any] = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice ) assert torch_all_close(a_, a_, atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]], [37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[int], a_: Any ): '''simple docstring''' _snake_case : Union[str, Any] = self.get_sd_vae_model() _snake_case : List[str] = self.get_sd_image(a_, shape=(3, 4, 64, 64) ) with torch.no_grad(): _snake_case : Union[str, Any] = model.decode(a_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] _snake_case : Union[str, Any] = sample[-1, -2:, :2, -2:].flatten().cpu() _snake_case : List[Any] = torch.tensor(a_ ) assert torch_all_close(a_, a_, atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]], [16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[Any], a_: int ): '''simple docstring''' _snake_case : Tuple = self.get_sd_vae_model(fpaa=a_ ) _snake_case : Union[str, Any] = self.get_sd_image(a_, shape=(3, 4, 64, 64), fpaa=a_ ) with torch.no_grad(): _snake_case : Any = model.decode(a_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] _snake_case : int = sample[-1, -2:, :2, -2:].flatten().float().cpu() _snake_case : Tuple = torch.tensor(a_ ) assert torch_all_close(a_, a_, atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available(), reason="""xformers is not required when using PyTorch 2.0.""" ) def UpperCamelCase_ ( self: Any, a_: Optional[Any] ): '''simple docstring''' _snake_case : Dict = self.get_sd_vae_model(fpaa=a_ ) _snake_case : List[Any] = self.get_sd_image(a_, shape=(3, 4, 64, 64), fpaa=a_ ) with torch.no_grad(): _snake_case : Optional[int] = model.decode(a_ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): _snake_case : List[str] = model.decode(a_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(a_, a_, atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available(), reason="""xformers is not required when using PyTorch 2.0.""" ) def UpperCamelCase_ ( self: str, a_: Union[str, Any] ): '''simple docstring''' _snake_case : List[Any] = self.get_sd_vae_model() _snake_case : Any = self.get_sd_image(a_, shape=(3, 4, 64, 64) ) with torch.no_grad(): _snake_case : int = model.decode(a_ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): _snake_case : Union[str, Any] = model.decode(a_ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(a_, a_, atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]], [47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]], # fmt: on ] ) def UpperCamelCase_ ( self: Dict, a_: Tuple, a_: Tuple ): '''simple docstring''' _snake_case : str = self.get_sd_vae_model() _snake_case : int = self.get_sd_image(a_ ) _snake_case : Dict = self.get_generator(a_ ) with torch.no_grad(): _snake_case : Dict = model.encode(a_ ).latent_dist _snake_case : Dict = dist.sample(generator=a_ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] _snake_case : Optional[int] = sample[0, -1, -3:, -3:].flatten().cpu() _snake_case : Tuple = torch.tensor(a_ ) _snake_case : List[Any] = 3E-3 if torch_device != """mps""" else 1E-2 assert torch_all_close(a_, a_, atol=a_ )
64
'''simple docstring''' from string import ascii_lowercase, ascii_uppercase def snake_case__ ( _A: str ) -> str: '''simple docstring''' if not sentence: return "" lowerCAmelCase = dict(zip(_A , _A ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
272
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL UpperCamelCase__ = logging.get_logger(__name__) def lowerCAmelCase_ ( __A ) -> List[List[ImageInput]]: '''simple docstring''' if isinstance(__A, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__A, (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__A ): return [[videos]] raise ValueError(f"""Could not make batched video from {videos}""" ) class A ( UpperCAmelCase_ ): __UpperCAmelCase : List[Any] = ['pixel_values'] def __init__(self : Any , __UpperCAmelCase : bool = True , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCAmelCase : bool = True , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : Union[int, float] = 1 / 2_5_5 , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , **__UpperCAmelCase : List[Any] , ) -> None: """simple docstring""" super().__init__(**__UpperCAmelCase ) UpperCAmelCase__ = size if size is not None else {"shortest_edge": 2_2_4} UpperCAmelCase__ = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) UpperCAmelCase__ = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4} UpperCAmelCase__ = get_size_dict(__UpperCAmelCase , param_name="crop_size" ) UpperCAmelCase__ = do_resize UpperCAmelCase__ = size UpperCAmelCase__ = do_center_crop UpperCAmelCase__ = crop_size UpperCAmelCase__ = resample UpperCAmelCase__ = do_rescale UpperCAmelCase__ = rescale_factor UpperCAmelCase__ = do_normalize UpperCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase_ (self : Tuple , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Dict[str, int] , __UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : int , ) -> np.ndarray: """simple docstring""" UpperCAmelCase__ = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" in size: UpperCAmelCase__ = get_resize_output_image_size(__UpperCAmelCase , size["shortest_edge"] , default_to_square=__UpperCAmelCase ) elif "height" in size and "width" in size: UpperCAmelCase__ = (size["height"], size["width"]) else: raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowercase_ (self : Dict , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Dict[str, int] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Any , ) -> np.ndarray: """simple docstring""" UpperCAmelCase__ = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(__UpperCAmelCase , size=(size["height"], size["width"]) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowercase_ (self : Any , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Union[int, float] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : List[Any] , ) -> str: """simple docstring""" return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowercase_ (self : Dict , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Union[float, List[float]] , __UpperCAmelCase : Union[float, List[float]] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Dict , ) -> np.ndarray: """simple docstring""" return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowercase_ (self : Optional[Any] , __UpperCAmelCase : ImageInput , __UpperCAmelCase : bool = None , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : PILImageResampling = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : float = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: """simple docstring""" if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. UpperCAmelCase__ = to_numpy_array(__UpperCAmelCase ) if do_resize: UpperCAmelCase__ = self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) if do_center_crop: UpperCAmelCase__ = self.center_crop(__UpperCAmelCase , size=__UpperCAmelCase ) if do_rescale: UpperCAmelCase__ = self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) if do_normalize: UpperCAmelCase__ = self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) UpperCAmelCase__ = to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) return image def lowercase_ (self : List[Any] , __UpperCAmelCase : ImageInput , __UpperCAmelCase : bool = None , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : PILImageResampling = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : float = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__UpperCAmelCase : Optional[Any] , ) -> PIL.Image.Image: """simple docstring""" UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize UpperCAmelCase__ = resample if resample is not None else self.resample UpperCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase__ = image_mean if image_mean is not None else self.image_mean UpperCAmelCase__ = image_std if image_std is not None else self.image_std UpperCAmelCase__ = size if size is not None else self.size UpperCAmelCase__ = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) UpperCAmelCase__ = crop_size if crop_size is not None else self.crop_size UpperCAmelCase__ = get_size_dict(__UpperCAmelCase , param_name="crop_size" ) if not valid_images(__UpperCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) UpperCAmelCase__ = make_batched(__UpperCAmelCase ) UpperCAmelCase__ = [ [ self._preprocess_image( image=__UpperCAmelCase , do_resize=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , do_center_crop=__UpperCAmelCase , crop_size=__UpperCAmelCase , do_rescale=__UpperCAmelCase , rescale_factor=__UpperCAmelCase , do_normalize=__UpperCAmelCase , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase , data_format=__UpperCAmelCase , ) for img in video ] for video in videos ] UpperCAmelCase__ = {"pixel_values": videos} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
65
'''simple docstring''' import os import string import sys __lowercase = 1 << 8 __lowercase = { '''tab''': ord('''\t'''), '''newline''': ord('''\r'''), '''esc''': 2_7, '''up''': 6_5 + ARROW_KEY_FLAG, '''down''': 6_6 + ARROW_KEY_FLAG, '''right''': 6_7 + ARROW_KEY_FLAG, '''left''': 6_8 + ARROW_KEY_FLAG, '''mod_int''': 9_1, '''undefined''': sys.maxsize, '''interrupt''': 3, '''insert''': 5_0, '''delete''': 5_1, '''pg_up''': 5_3, '''pg_down''': 5_4, } __lowercase = KEYMAP['''up'''] __lowercase = KEYMAP['''left'''] if sys.platform == "win32": __lowercase = [] __lowercase = { B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, } for i in range(1_0): __lowercase = ord(str(i)) def snake_case__ ( ) -> List[Any]: '''simple docstring''' if os.name == "nt": import msvcrt lowerCAmelCase = """mbcs""" # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(_A ) == 0: # Read the keystroke lowerCAmelCase = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): lowerCAmelCase = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: lowerCAmelCase = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) ) WIN_CH_BUFFER.append(_A ) if ord(_A ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) lowerCAmelCase = chr(KEYMAP["""esc"""] ) except KeyError: lowerCAmelCase = cha[1] else: lowerCAmelCase = ch.decode(_A ) else: lowerCAmelCase = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty lowerCAmelCase = sys.stdin.fileno() lowerCAmelCase = termios.tcgetattr(_A ) try: tty.setraw(_A ) lowerCAmelCase = sys.stdin.read(1 ) finally: termios.tcsetattr(_A , termios.TCSADRAIN , _A ) return ch def snake_case__ ( ) -> Tuple: '''simple docstring''' lowerCAmelCase = get_raw_chars() if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(_A ) == KEYMAP["esc"]: lowerCAmelCase = get_raw_chars() if ord(_A ) == KEYMAP["mod_int"]: lowerCAmelCase = get_raw_chars() if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(_A ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
272
0
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __a = logging.get_logger(__name__) __a = { "EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : List[Any] = """gptj""" _A : Union[str, Any] = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self: int , snake_case: int=50_400 , snake_case: Optional[Any]=2_048 , snake_case: Any=4_096 , snake_case: Dict=28 , snake_case: Union[str, Any]=16 , snake_case: Optional[int]=64 , snake_case: List[Any]=None , snake_case: List[str]="gelu_new" , snake_case: Dict=0.0 , snake_case: Union[str, Any]=0.0 , snake_case: List[Any]=0.0 , snake_case: List[Any]=1E-5 , snake_case: Any=0.0_2 , snake_case: Union[str, Any]=True , snake_case: int=50_256 , snake_case: int=50_256 , snake_case: List[Any]=False , **snake_case: List[str] , ) -> Optional[Any]: snake_case_ :Optional[Any] = vocab_size snake_case_ :List[Any] = n_positions snake_case_ :List[str] = n_embd snake_case_ :List[str] = n_layer snake_case_ :int = n_head snake_case_ :int = n_inner snake_case_ :List[str] = rotary_dim snake_case_ :Optional[Any] = activation_function snake_case_ :int = resid_pdrop snake_case_ :List[str] = embd_pdrop snake_case_ :str = attn_pdrop snake_case_ :Union[str, Any] = layer_norm_epsilon snake_case_ :Optional[Any] = initializer_range snake_case_ :Any = use_cache snake_case_ :Tuple = bos_token_id snake_case_ :Any = eos_token_id super().__init__( bos_token_id=snake_case , eos_token_id=snake_case , tie_word_embeddings=snake_case , **snake_case ) class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' def __init__( self: int , snake_case: PretrainedConfig , snake_case: str = "default" , snake_case: List[PatchingSpec] = None , snake_case: bool = False , ) -> Any: super().__init__(snake_case , task=snake_case , patching_specs=snake_case , use_past=snake_case ) if not getattr(self._config , """pad_token_id""" , snake_case ): # TODO: how to do that better? snake_case_ :Optional[Any] = 0 @property def lowerCAmelCase_ ( self: Optional[int] ) -> Mapping[str, Mapping[int, str]]: snake_case_ :Tuple = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(snake_case , direction="""inputs""" ) snake_case_ :Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""} else: snake_case_ :Tuple = {0: """batch""", 1: """sequence"""} return common_inputs @property def lowerCAmelCase_ ( self: Tuple ) -> int: return self._config.n_layer @property def lowerCAmelCase_ ( self: Optional[int] ) -> int: return self._config.n_head def lowerCAmelCase_ ( self: int , snake_case: PreTrainedTokenizer , snake_case: int = -1 , snake_case: int = -1 , snake_case: bool = False , snake_case: Optional[TensorType] = None , ) -> Mapping[str, Any]: snake_case_ :Tuple = super(snake_case , self ).generate_dummy_inputs( snake_case , batch_size=snake_case , seq_length=snake_case , is_pair=snake_case , framework=snake_case ) # We need to order the input in the way they appears in the forward() snake_case_ :int = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch snake_case_, snake_case_ :List[str] = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values snake_case_ :Dict = seqlen + 2 snake_case_ :List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) snake_case_ :Optional[int] = [ (torch.zeros(snake_case ), torch.zeros(snake_case )) for _ in range(self.num_layers ) ] snake_case_ :Dict = common_inputs["""attention_mask"""] if self.use_past: snake_case_ :Optional[int] = ordered_inputs["""attention_mask"""].dtype snake_case_ :List[str] = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(snake_case , snake_case , dtype=snake_case )] , dim=1 ) return ordered_inputs @property def lowerCAmelCase_ ( self: List[str] ) -> int: return 13
66
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __lowercase = logging.get_logger(__name__) class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = ['''input_features'''] def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=16000 , __lowerCAmelCase=160 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , **__lowerCAmelCase , ): """simple docstring""" super().__init__( feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , ) lowerCAmelCase = n_fft lowerCAmelCase = hop_length lowerCAmelCase = chunk_length lowerCAmelCase = chunk_length * sampling_rate lowerCAmelCase = self.n_samples // hop_length lowerCAmelCase = sampling_rate lowerCAmelCase = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , ) def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = spectrogram( __lowerCAmelCase , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , ) lowerCAmelCase = log_spec[:, :-1] lowerCAmelCase = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0) lowerCAmelCase = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0): """simple docstring""" if attention_mask is not None: lowerCAmelCase = np.array(__lowerCAmelCase , np.intaa) lowerCAmelCase = [] for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1)): lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7) if length < normed_slice.shape[0]: lowerCAmelCase = padding_value normed_input_values.append(__lowerCAmelCase) else: lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values] return normed_input_values def __call__( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "max_length" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" f" was sampled with {self.sampling_rate} and not {sampling_rate}.") else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""") lowerCAmelCase = isinstance(__lowerCAmelCase , np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}") lowerCAmelCase = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list))) ) if is_batched: lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray): lowerCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa) elif isinstance(__lowerCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa): lowerCAmelCase = raw_speech.astype(np.floataa) # always return batch if not is_batched: lowerCAmelCase = [np.asarray([raw_speech]).T] lowerCAmelCase = BatchFeature({"""input_features""": raw_speech}) # convert into correct format for padding lowerCAmelCase = self.pad( __lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: lowerCAmelCase = self.zero_mean_unit_var_norm( padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , ) lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0) # make sure list is in array format lowerCAmelCase = padded_inputs.get("""input_features""").transpose(2 , 0 , 1) lowerCAmelCase = [self._np_extract_fbank_features(__lowerCAmelCase) for waveform in input_features[0]] if isinstance(input_features[0] , __lowerCAmelCase): lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for feature in input_features] else: lowerCAmelCase = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) lowerCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length] if return_tensors is not None: lowerCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase) return padded_inputs def a_ ( self): """simple docstring""" lowerCAmelCase = copy.deepcopy(self.__dict__) lowerCAmelCase = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
272
0
'''simple docstring''' from __future__ import annotations def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , ) -> tuple[int, float, str]: __lowerCamelCase = cipher_alphabet or [chr(UpperCamelCase__ ) for i in range(97 , 1_23 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) __lowerCamelCase = { '''a''': 0.0_8_4_9_7, '''b''': 0.0_1_4_9_2, '''c''': 0.0_2_2_0_2, '''d''': 0.0_4_2_5_3, '''e''': 0.1_1_1_6_2, '''f''': 0.0_2_2_2_8, '''g''': 0.0_2_0_1_5, '''h''': 0.0_6_0_9_4, '''i''': 0.0_7_5_4_6, '''j''': 0.0_0_1_5_3, '''k''': 0.0_1_2_9_2, '''l''': 0.0_4_0_2_5, '''m''': 0.0_2_4_0_6, '''n''': 0.0_6_7_4_9, '''o''': 0.0_7_5_0_7, '''p''': 0.0_1_9_2_9, '''q''': 0.0_0_0_9_5, '''r''': 0.0_7_5_8_7, '''s''': 0.0_6_3_2_7, '''t''': 0.0_9_3_5_6, '''u''': 0.0_2_7_5_8, '''v''': 0.0_0_9_7_8, '''w''': 0.0_2_5_6_0, '''x''': 0.0_0_1_5_0, '''y''': 0.0_1_9_9_4, '''z''': 0.0_0_0_7_7, } else: # Custom frequencies dictionary __lowerCamelCase = frequencies_dict if not case_sensitive: __lowerCamelCase = ciphertext.lower() # Chi squared statistic values __lowerCamelCase = {} # cycle through all of the shifts for shift in range(len(UpperCamelCase__ ) ): __lowerCamelCase = '''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet __lowerCamelCase = (alphabet_letters.index(letter.lower() ) - shift) % len( UpperCamelCase__ ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter __lowerCamelCase = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: __lowerCamelCase = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message __lowerCamelCase = decrypted_with_shift.lower().count(UpperCamelCase__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies __lowerCamelCase = frequencies[letter] * occurrences # Complete the chi squared statistic formula __lowerCamelCase = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message __lowerCamelCase = decrypted_with_shift.count(UpperCamelCase__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies __lowerCamelCase = frequencies[letter] * occurrences # Complete the chi squared statistic formula __lowerCamelCase = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary __lowerCamelCase = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(UpperCamelCase__ ) -> tuple[float, str]: return chi_squared_statistic_values[key] __lowerCamelCase = min( UpperCamelCase__ , key=UpperCamelCase__ , ) # Get all the data from the most likely cipher (key, decoded message) ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
67
'''simple docstring''' from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig __lowercase = logging.get_logger(__name__) __lowercase = '''T5Config''' class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : List[str] = '''mt5''' UpperCAmelCase_ : Tuple = MTaConfig class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : List[Any] = '''mt5''' UpperCAmelCase_ : int = MTaConfig class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : Tuple = '''mt5''' UpperCAmelCase_ : Union[str, Any] = MTaConfig
272
0
from PIL import Image def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Image , SCREAMING_SNAKE_CASE_: int ) -> Image: '''simple docstring''' A__ = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level)) def contrast(SCREAMING_SNAKE_CASE_: int ) -> int: return int(1_2_8 + factor * (c - 1_2_8) ) return img.point(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change contrast to 170 lowerCAmelCase__ = change_contrast(img, 1_7_0) cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
68
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __lowercase = { '''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''', '''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''', } class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : List[str] = '''ernie_m''' UpperCAmelCase_ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self , __lowerCAmelCase = 250002 , __lowerCAmelCase = 768 , __lowerCAmelCase = 12 , __lowerCAmelCase = 12 , __lowerCAmelCase = 3072 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 514 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1E-0_5 , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ): """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = classifier_dropout lowerCAmelCase = is_decoder lowerCAmelCase = act_dropout
272
0
"""simple docstring""" import argparse __UpperCamelCase = '''docs/source/_static/js/custom.js''' def UpperCAmelCase ( UpperCAmelCase ) -> int: with open(UpperCAmelCase , encoding='utf-8' , newline='\n' ) as f: snake_case_ = f.readlines() snake_case_ = 0 # First let's put the right version while not lines[index].startswith('const stableVersion =' ): index += 1 snake_case_ = f'const stableVersion = "v{version}"\n' # Then update the dictionary while not lines[index].startswith('const versionMapping = {' ): index += 1 # We go until the end while not lines[index].startswith('}' ): index += 1 # We add the new version at the end lines[index - 1] += f' "v{version}": "v{version}",\n' with open(UpperCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(UpperCAmelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--version''', help='''Release version.''') __UpperCamelCase = parser.parse_args() update_custom_js(args.version)
69
'''simple docstring''' import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __lowercase = logging.getLogger(__name__) class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : Any = '''sequence-classification''' def __init__( self , __lowerCAmelCase): """simple docstring""" if type(__lowerCAmelCase) == dict: lowerCAmelCase = Namespace(**__lowerCAmelCase) lowerCAmelCase = glue_output_modes[hparams.task] lowerCAmelCase = glue_tasks_num_labels[hparams.task] super().__init__(__lowerCAmelCase , __lowerCAmelCase , self.mode) def a_ ( self , **__lowerCAmelCase): """simple docstring""" return self.model(**__lowerCAmelCase) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None lowerCAmelCase = self(**__lowerCAmelCase) lowerCAmelCase = outputs[0] lowerCAmelCase = self.trainer.lr_schedulers[0]["""scheduler"""] lowerCAmelCase = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def a_ ( self): """simple docstring""" lowerCAmelCase = self.hparams lowerCAmelCase = processors[args.task]() lowerCAmelCase = processor.get_labels() for mode in ["train", "dev"]: lowerCAmelCase = self._feature_file(__lowerCAmelCase) if os.path.exists(__lowerCAmelCase) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , __lowerCAmelCase) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir) lowerCAmelCase = ( processor.get_dev_examples(args.data_dir) if mode == """dev""" else processor.get_train_examples(args.data_dir) ) lowerCAmelCase = convert_examples_to_features( __lowerCAmelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("""Saving features into cached file %s""" , __lowerCAmelCase) torch.save(__lowerCAmelCase , __lowerCAmelCase) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False): """simple docstring""" lowerCAmelCase = """dev""" if mode == """test""" else mode lowerCAmelCase = self._feature_file(__lowerCAmelCase) logger.info("""Loading features from cached file %s""" , __lowerCAmelCase) lowerCAmelCase = torch.load(__lowerCAmelCase) lowerCAmelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long) lowerCAmelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long) lowerCAmelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long) if self.hparams.glue_output_mode == "classification": lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.long) elif self.hparams.glue_output_mode == "regression": lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.float) return DataLoader( TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase) , batch_size=__lowerCAmelCase , shuffle=__lowerCAmelCase , ) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None lowerCAmelCase = self(**__lowerCAmelCase) lowerCAmelCase , lowerCAmelCase = outputs[:2] lowerCAmelCase = logits.detach().cpu().numpy() lowerCAmelCase = inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = torch.stack([x["""val_loss"""] for x in outputs]).mean().detach().cpu().item() lowerCAmelCase = np.concatenate([x["""pred"""] for x in outputs] , axis=0) if self.hparams.glue_output_mode == "classification": lowerCAmelCase = np.argmax(__lowerCAmelCase , axis=1) elif self.hparams.glue_output_mode == "regression": lowerCAmelCase = np.squeeze(__lowerCAmelCase) lowerCAmelCase = np.concatenate([x["""target"""] for x in outputs] , axis=0) lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])] lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])] lowerCAmelCase = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCAmelCase , __lowerCAmelCase)} lowerCAmelCase = dict(results.items()) lowerCAmelCase = results return ret, preds_list, out_label_list def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase) lowerCAmelCase = ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase) lowerCAmelCase = ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def a_ ( __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase) parser.add_argument( """--max_seq_length""" , default=128 , type=__lowerCAmelCase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--task""" , default="""""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The GLUE task to run""" , ) parser.add_argument( """--gpus""" , default=0 , type=__lowerCAmelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""") return parser def snake_case__ ( ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase = argparse.ArgumentParser() add_generic_args(_A , os.getcwd() ) lowerCAmelCase = GLUETransformer.add_model_specific_args(_A , os.getcwd() ) lowerCAmelCase = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: lowerCAmelCase = os.path.join( """./results""" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , ) os.makedirs(args.output_dir ) lowerCAmelCase = GLUETransformer(_A ) lowerCAmelCase = generic_train(_A , _A ) # Optionally, predict on dev set and write to output_dir if args.do_predict: lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=_A ) ) lowerCAmelCase = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_A ) if __name__ == "__main__": main()
272
0
'''simple docstring''' import operator as op def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = lambda lowerCAmelCase , lowerCAmelCase : int(x / y ) # noqa: E731 integer division operation _lowerCAmelCase = { """^""": op.pow, """*""": op.mul, """/""": div, """+""": op.add, """-""": op.sub, } # operators & their respective operation # print table header print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ ) print("""-""" * (30 + len(lowerCAmelCase )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(lowerCAmelCase ) # append x to stack # output in tabular format print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(lowerCAmelCase ) , sep=""" | """ ) else: _lowerCAmelCase = stack.pop() # pop stack # output in tabular format print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(lowerCAmelCase ) , sep=""" | """ ) _lowerCAmelCase = stack.pop() # pop stack # output in tabular format print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(lowerCAmelCase ) , sep=""" | """ ) stack.append( str(opr[x](int(lowerCAmelCase ) , int(lowerCAmelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(lowerCAmelCase ) , sep=""" | """ , ) return int(stack[0] ) if __name__ == "__main__": A__ : Optional[Any] =input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''') print('''\n\tResult = ''', solve(Postfix))
70
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor __lowercase = logging.get_logger(__name__) class a__( lowerCAmelCase__ ): '''simple docstring''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" warnings.warn( """The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use DeformableDetrImageProcessor instead.""" , __lowerCAmelCase , ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
272
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: A_ :Optional[int] = None A_ :Any = logging.get_logger(__name__) A_ :List[str] = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''} A_ :Any = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, '''tokenizer_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''', }, } A_ :int = { '''google/rembert''': 256, } A_ :Tuple = '''▁''' class __A ( a ): """simple docstring""" UpperCamelCase__ : str =VOCAB_FILES_NAMES UpperCamelCase__ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase__ : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase__ : List[str] =RemBertTokenizer def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__="[CLS]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="<unk>" , lowerCamelCase__="[SEP]" , lowerCamelCase__="<pad>" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , **lowerCamelCase__ , ): """simple docstring""" __UpperCamelCase : List[str] =AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token super().__init__( lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , remove_space=lowerCamelCase__ , keep_accents=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , **lowerCamelCase__ , ) __UpperCamelCase : str =do_lower_case __UpperCamelCase : List[str] =remove_space __UpperCamelCase : Dict =keep_accents __UpperCamelCase : Tuple =vocab_file __UpperCamelCase : Optional[int] =False if not self.vocab_file else True def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ): """simple docstring""" __UpperCamelCase : List[Any] =[self.sep_token_id] __UpperCamelCase : Optional[int] =[self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ): """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1] return [1] + ([0] * len(lowerCamelCase__ )) + [1] def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ): """simple docstring""" __UpperCamelCase : List[Any] =[self.sep_token_id] __UpperCamelCase : Optional[Any] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None ): """simple docstring""" if not os.path.isdir(lowerCamelCase__ ): logger.error('Vocabulary path ({}) should be a directory'.format(lowerCamelCase__ ) ) return __UpperCamelCase : List[str] =os.path.join( lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ): copyfile(self.vocab_file , lowerCamelCase__ ) return (out_vocab_file,)
71
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowercase = { '''configuration_instructblip''': [ '''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InstructBlipConfig''', '''InstructBlipQFormerConfig''', '''InstructBlipVisionConfig''', ], '''processing_instructblip''': ['''InstructBlipProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InstructBlipQFormerModel''', '''InstructBlipPreTrainedModel''', '''InstructBlipForConditionalGeneration''', '''InstructBlipVisionModel''', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
272
0
"""simple docstring""" import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def snake_case_ ( A_ : Tuple, A_ : int, A_ : Dict ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = LxmertConfig.from_json_file(A_ ) print(F'''Building PyTorch model from configuration: {config}''' ) _lowerCamelCase : List[str] = LxmertForPreTraining(A_ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(A_, A_, A_ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(), A_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
72
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class a__( unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Dict = ViTImageProcessor if is_vision_available() else None @property def a_ ( self): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def a_ ( self): """simple docstring""" lowerCAmelCase = (3, 32, 128) lowerCAmelCase = tempfile.mkdtemp() # fmt: off lowerCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase)))) lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(__lowerCAmelCase) + """\n""") lowerCAmelCase = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 128}, } lowerCAmelCase = os.path.join(self.tmpdirname , __lowerCAmelCase) with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase) def a_ ( self , **__lowerCAmelCase): """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase) def a_ ( self , **__lowerCAmelCase): """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase) def a_ ( self): """simple docstring""" shutil.rmtree(self.tmpdirname) def a_ ( self): """simple docstring""" lowerCAmelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta) lowerCAmelCase = Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1)) return image_input def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_image_processor() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) processor.save_pretrained(self.tmpdirname) lowerCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab()) self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor.image_processor , __lowerCAmelCase) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_image_processor() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) processor.save_pretrained(self.tmpdirname) lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""") lowerCAmelCase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0) lowerCAmelCase = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , __lowerCAmelCase) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""np""") lowerCAmelCase = processor(images=__lowerCAmelCase , return_tensors="""np""") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = """test""" lowerCAmelCase = processor(text=__lowerCAmelCase) lowerCAmelCase = tokenizer(__lowerCAmelCase) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = """test""" lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase) self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""]) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase): processor() def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase = processor.char_decode(__lowerCAmelCase) lowerCAmelCase = tokenizer.batch_decode(__lowerCAmelCase) lowerCAmelCase = [seq.replace(""" """ , """""") for seq in decoded_tok] self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = None lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase) self.assertListEqual(list(inputs.keys()) , processor.model_input_names) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = torch.randn(1 , 27 , 38) lowerCAmelCase = torch.randn(1 , 27 , 50257) lowerCAmelCase = torch.randn(1 , 27 , 30522) lowerCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input]) self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
272
0
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]: __lowerCamelCase : str = checkpoint __lowerCamelCase : Optional[Any] = {} __lowerCamelCase : Any = vae_state_dict['encoder.conv_in.weight'] __lowerCamelCase : List[str] = vae_state_dict['encoder.conv_in.bias'] __lowerCamelCase : Optional[Any] = vae_state_dict['encoder.conv_out.weight'] __lowerCamelCase : int = vae_state_dict['encoder.conv_out.bias'] __lowerCamelCase : Union[str, Any] = vae_state_dict['encoder.norm_out.weight'] __lowerCamelCase : Optional[int] = vae_state_dict['encoder.norm_out.bias'] __lowerCamelCase : Optional[Any] = vae_state_dict['decoder.conv_in.weight'] __lowerCamelCase : Optional[int] = vae_state_dict['decoder.conv_in.bias'] __lowerCamelCase : Union[str, Any] = vae_state_dict['decoder.conv_out.weight'] __lowerCamelCase : Any = vae_state_dict['decoder.conv_out.bias'] __lowerCamelCase : int = vae_state_dict['decoder.norm_out.weight'] __lowerCamelCase : Optional[int] = vae_state_dict['decoder.norm_out.bias'] __lowerCamelCase : Dict = vae_state_dict['quant_conv.weight'] __lowerCamelCase : Optional[Any] = vae_state_dict['quant_conv.bias'] __lowerCamelCase : Optional[int] = vae_state_dict['post_quant_conv.weight'] __lowerCamelCase : str = vae_state_dict['post_quant_conv.bias'] # Retrieves the keys for the encoder down blocks only __lowerCamelCase : Optional[int] = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} ) __lowerCamelCase : str = { layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(lowerCamelCase__ ) } # Retrieves the keys for the decoder up blocks only __lowerCamelCase : Any = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} ) __lowerCamelCase : Tuple = { layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(lowerCamelCase__ ) } for i in range(lowerCamelCase__ ): __lowerCamelCase : Optional[int] = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key] if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: __lowerCamelCase : Optional[Any] = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.weight" ) __lowerCamelCase : Union[str, Any] = vae_state_dict.pop( F"encoder.down.{i}.downsample.conv.bias" ) __lowerCamelCase : List[str] = renew_vae_resnet_paths(lowerCamelCase__ ) __lowerCamelCase : List[str] = {'old': F"down.{i}.block", 'new': F"down_blocks.{i}.resnets"} assign_to_checkpoint(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , additional_replacements=[meta_path] , config=lowerCamelCase__ ) __lowerCamelCase : int = [key for key in vae_state_dict if 'encoder.mid.block' in key] __lowerCamelCase : Optional[Any] = 2 for i in range(1 , num_mid_res_blocks + 1 ): __lowerCamelCase : str = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key] __lowerCamelCase : Optional[int] = renew_vae_resnet_paths(lowerCamelCase__ ) __lowerCamelCase : Tuple = {'old': F"mid.block_{i}", 'new': F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , additional_replacements=[meta_path] , config=lowerCamelCase__ ) __lowerCamelCase : Union[str, Any] = [key for key in vae_state_dict if 'encoder.mid.attn' in key] __lowerCamelCase : Optional[Any] = renew_vae_attention_paths(lowerCamelCase__ ) __lowerCamelCase : Any = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , additional_replacements=[meta_path] , config=lowerCamelCase__ ) conv_attn_to_linear(lowerCamelCase__ ) for i in range(lowerCamelCase__ ): __lowerCamelCase : str = num_up_blocks - 1 - i __lowerCamelCase : Optional[int] = [ key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key ] if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: __lowerCamelCase : Union[str, Any] = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.weight" ] __lowerCamelCase : int = vae_state_dict[ F"decoder.up.{block_id}.upsample.conv.bias" ] __lowerCamelCase : str = renew_vae_resnet_paths(lowerCamelCase__ ) __lowerCamelCase : Optional[int] = {'old': F"up.{block_id}.block", 'new': F"up_blocks.{i}.resnets"} assign_to_checkpoint(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , additional_replacements=[meta_path] , config=lowerCamelCase__ ) __lowerCamelCase : List[str] = [key for key in vae_state_dict if 'decoder.mid.block' in key] __lowerCamelCase : List[Any] = 2 for i in range(1 , num_mid_res_blocks + 1 ): __lowerCamelCase : Optional[Any] = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key] __lowerCamelCase : Optional[Any] = renew_vae_resnet_paths(lowerCamelCase__ ) __lowerCamelCase : Optional[Any] = {'old': F"mid.block_{i}", 'new': F"mid_block.resnets.{i - 1}"} assign_to_checkpoint(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , additional_replacements=[meta_path] , config=lowerCamelCase__ ) __lowerCamelCase : Union[str, Any] = [key for key in vae_state_dict if 'decoder.mid.attn' in key] __lowerCamelCase : Optional[int] = renew_vae_attention_paths(lowerCamelCase__ ) __lowerCamelCase : List[Any] = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'} assign_to_checkpoint(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , additional_replacements=[meta_path] , config=lowerCamelCase__ ) conv_attn_to_linear(lowerCamelCase__ ) return new_checkpoint def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , ) -> str: # Only support V1 __lowerCamelCase : Tuple = requests.get( ' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' ) __lowerCamelCase : List[Any] = io.BytesIO(r.content ) __lowerCamelCase : int = OmegaConf.load(lowerCamelCase__ ) __lowerCamelCase : int = 5_1_2 __lowerCamelCase : Optional[Any] = 'cuda' if torch.cuda.is_available() else 'cpu' if checkpoint_path.endswith('safetensors' ): from safetensors import safe_open __lowerCamelCase : Dict = {} with safe_open(lowerCamelCase__ , framework='pt' , device='cpu' ) as f: for key in f.keys(): __lowerCamelCase : Dict = f.get_tensor(lowerCamelCase__ ) else: __lowerCamelCase : Optional[int] = torch.load(lowerCamelCase__ , map_location=lowerCamelCase__ )['state_dict'] # Convert the VAE model. __lowerCamelCase : Dict = create_vae_diffusers_config(lowerCamelCase__ , image_size=lowerCamelCase__ ) __lowerCamelCase : Tuple = custom_convert_ldm_vae_checkpoint(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase : Union[str, Any] = AutoencoderKL(**lowerCamelCase__ ) vae.load_state_dict(lowerCamelCase__ ) vae.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": a =argparse.ArgumentParser() parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""") parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""") a =parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
73
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class a__( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Tuple = XLMRobertaTokenizer UpperCAmelCase_ : int = XLMRobertaTokenizerFast UpperCAmelCase_ : List[str] = True UpperCAmelCase_ : Optional[int] = True def a_ ( self): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase) tokenizer.save_pretrained(self.tmpdirname) def a_ ( self): """simple docstring""" lowerCAmelCase = """<pad>""" lowerCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase) , __lowerCAmelCase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase) , __lowerCAmelCase) def a_ ( self): """simple docstring""" lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , """<s>""") self.assertEqual(vocab_keys[1] , """<pad>""") self.assertEqual(vocab_keys[-1] , """<mask>""") self.assertEqual(len(__lowerCAmelCase) , 1002) def a_ ( self): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1002) def a_ ( self): """simple docstring""" lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase) lowerCAmelCase = tokenizer.tokenize("""This is a test""") self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""") self.assertListEqual( __lowerCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase) self.assertListEqual( __lowerCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) lowerCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase) self.assertListEqual( __lowerCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def a_ ( self): """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowerCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files)) lowerCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f) self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase) # Checks everything loads correctly in the same way lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__lowerCAmelCase) # Save tokenizer rust, legacy_format=True lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase) lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase) # Checks it save with the same files self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase) # Checks everything loads correctly in the same way lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase)) shutil.rmtree(__lowerCAmelCase) # Save tokenizer rust, legacy_format=False lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase) lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase)) shutil.rmtree(__lowerCAmelCase) @cached_property def a_ ( self): """simple docstring""" return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""") def a_ ( self): """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__lowerCAmelCase , f.name) lowerCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=__lowerCAmelCase) lowerCAmelCase = pickle.dumps(__lowerCAmelCase) pickle.loads(__lowerCAmelCase) def a_ ( self): """simple docstring""" if not self.test_rust_tokenizer: return lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_rust_tokenizer() lowerCAmelCase = """I was born in 92000, and this is falsé.""" lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase) lowerCAmelCase = rust_tokenizer.tokenize(__lowerCAmelCase) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase) lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) lowerCAmelCase = self.get_rust_tokenizer() lowerCAmelCase = tokenizer.encode(__lowerCAmelCase) lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) @slow def a_ ( self): """simple docstring""" lowerCAmelCase = """Hello World!""" lowerCAmelCase = [0, 35378, 6661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase)) @slow def a_ ( self): """simple docstring""" lowerCAmelCase = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) lowerCAmelCase = [ 0, 3293, 83, 10, 4552, 4989, 7986, 678, 10, 5915, 111, 179459, 124850, 4, 6044, 237, 12, 6, 5, 6, 4, 6780, 705, 15, 1388, 44, 378, 10114, 711, 152, 20, 6, 5, 22376, 642, 1221, 15190, 34153, 450, 5608, 959, 1119, 57702, 136, 186, 47, 1098, 29367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6044, 237, 6284, 50901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase)) @slow def a_ ( self): """simple docstring""" lowerCAmelCase = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCAmelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
272
0
"""simple docstring""" def _snake_case ( snake_case__ : int , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): # Return True if there is node that has not iterated. A = [False] * len(snake_case__ ) A = [] queue.append(snake_case__ ) A = True while queue: A = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(snake_case__ ) A = True A = u return visited[t] def _snake_case ( snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[str] ): # This array is filled by BFS and to store path A = [-1] * (len(snake_case__ )) A = 0 while bfs(snake_case__ , snake_case__ , snake_case__ , snake_case__ ): A = float('Inf' ) A = sink while s != source: # Find the minimum value in select path A = min(snake_case__ , graph[parent[s]][s] ) A = parent[s] max_flow += path_flow A = sink while v != source: A = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow A = parent[v] return max_flow _lowercase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] _lowercase , _lowercase = 0, 5 print(ford_fulkerson(graph, source, sink))
74
'''simple docstring''' def snake_case__ ( _A: int , _A: int ) -> int: '''simple docstring''' while a != 0: lowerCAmelCase , lowerCAmelCase = b % a, a return b def snake_case__ ( _A: int , _A: int ) -> int: '''simple docstring''' if gcd(_A , _A ) != 1: lowerCAmelCase = f"mod inverse of {a!r} and {m!r} does not exist" raise ValueError(_A ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 0, a lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0, 1, m while va != 0: lowerCAmelCase = ua // va lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
272
0
'''simple docstring''' from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker a_ : Dict = """CompVis/stable-diffusion-v1-1""" a_ : str = """CompVis/stable-diffusion-v1-2""" a_ : Any = """CompVis/stable-diffusion-v1-3""" a_ : str = """CompVis/stable-diffusion-v1-4""" class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = True, ): """simple docstring""" super()._init_() lowerCamelCase_ =StableDiffusionPipeline.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =StableDiffusionPipeline.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =StableDiffusionPipeline.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =StableDiffusionPipeline( vae=lowerCAmelCase, text_encoder=lowerCAmelCase, tokenizer=lowerCAmelCase, unet=lowerCAmelCase, scheduler=lowerCAmelCase, safety_checker=lowerCAmelCase, feature_extractor=lowerCAmelCase, requires_safety_checker=lowerCAmelCase, ) self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea ) @property def lowercase__ ( self ): """simple docstring""" return {k: getattr(self, lowerCAmelCase ) for k in self.config.keys() if not k.startswith('''_''' )} def lowercase__ ( self, lowerCAmelCase = "auto" ): """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowerCamelCase_ =self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" self.enable_attention_slicing(lowerCAmelCase ) @torch.no_grad() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = 512, lowerCAmelCase = 512, lowerCAmelCase = 50, lowerCAmelCase = 7.5, lowerCAmelCase = None, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = 1, **lowerCAmelCase, ): """simple docstring""" return self.pipea( prompt=lowerCAmelCase, height=lowerCAmelCase, width=lowerCAmelCase, num_inference_steps=lowerCAmelCase, guidance_scale=lowerCAmelCase, negative_prompt=lowerCAmelCase, num_images_per_prompt=lowerCAmelCase, eta=lowerCAmelCase, generator=lowerCAmelCase, latents=lowerCAmelCase, output_type=lowerCAmelCase, return_dict=lowerCAmelCase, callback=lowerCAmelCase, callback_steps=lowerCAmelCase, **lowerCAmelCase, ) @torch.no_grad() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = 512, lowerCAmelCase = 512, lowerCAmelCase = 50, lowerCAmelCase = 7.5, lowerCAmelCase = None, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = 1, **lowerCAmelCase, ): """simple docstring""" return self.pipea( prompt=lowerCAmelCase, height=lowerCAmelCase, width=lowerCAmelCase, num_inference_steps=lowerCAmelCase, guidance_scale=lowerCAmelCase, negative_prompt=lowerCAmelCase, num_images_per_prompt=lowerCAmelCase, eta=lowerCAmelCase, generator=lowerCAmelCase, latents=lowerCAmelCase, output_type=lowerCAmelCase, return_dict=lowerCAmelCase, callback=lowerCAmelCase, callback_steps=lowerCAmelCase, **lowerCAmelCase, ) @torch.no_grad() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = 512, lowerCAmelCase = 512, lowerCAmelCase = 50, lowerCAmelCase = 7.5, lowerCAmelCase = None, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = 1, **lowerCAmelCase, ): """simple docstring""" return self.pipea( prompt=lowerCAmelCase, height=lowerCAmelCase, width=lowerCAmelCase, num_inference_steps=lowerCAmelCase, guidance_scale=lowerCAmelCase, negative_prompt=lowerCAmelCase, num_images_per_prompt=lowerCAmelCase, eta=lowerCAmelCase, generator=lowerCAmelCase, latents=lowerCAmelCase, output_type=lowerCAmelCase, return_dict=lowerCAmelCase, callback=lowerCAmelCase, callback_steps=lowerCAmelCase, **lowerCAmelCase, ) @torch.no_grad() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = 512, lowerCAmelCase = 512, lowerCAmelCase = 50, lowerCAmelCase = 7.5, lowerCAmelCase = None, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = 1, **lowerCAmelCase, ): """simple docstring""" return self.pipea( prompt=lowerCAmelCase, height=lowerCAmelCase, width=lowerCAmelCase, num_inference_steps=lowerCAmelCase, guidance_scale=lowerCAmelCase, negative_prompt=lowerCAmelCase, num_images_per_prompt=lowerCAmelCase, eta=lowerCAmelCase, generator=lowerCAmelCase, latents=lowerCAmelCase, output_type=lowerCAmelCase, return_dict=lowerCAmelCase, callback=lowerCAmelCase, callback_steps=lowerCAmelCase, **lowerCAmelCase, ) @torch.no_grad() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = 512, lowerCAmelCase = 512, lowerCAmelCase = 50, lowerCAmelCase = 7.5, lowerCAmelCase = None, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = 1, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ ='''cuda''' if torch.cuda.is_available() else '''cpu''' self.to(lowerCAmelCase ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' ) # Get first result from Stable Diffusion Checkpoint v1.1 lowerCamelCase_ =self.textaimg_sda_a( prompt=lowerCAmelCase, height=lowerCAmelCase, width=lowerCAmelCase, num_inference_steps=lowerCAmelCase, guidance_scale=lowerCAmelCase, negative_prompt=lowerCAmelCase, num_images_per_prompt=lowerCAmelCase, eta=lowerCAmelCase, generator=lowerCAmelCase, latents=lowerCAmelCase, output_type=lowerCAmelCase, return_dict=lowerCAmelCase, callback=lowerCAmelCase, callback_steps=lowerCAmelCase, **lowerCAmelCase, ) # Get first result from Stable Diffusion Checkpoint v1.2 lowerCamelCase_ =self.textaimg_sda_a( prompt=lowerCAmelCase, height=lowerCAmelCase, width=lowerCAmelCase, num_inference_steps=lowerCAmelCase, guidance_scale=lowerCAmelCase, negative_prompt=lowerCAmelCase, num_images_per_prompt=lowerCAmelCase, eta=lowerCAmelCase, generator=lowerCAmelCase, latents=lowerCAmelCase, output_type=lowerCAmelCase, return_dict=lowerCAmelCase, callback=lowerCAmelCase, callback_steps=lowerCAmelCase, **lowerCAmelCase, ) # Get first result from Stable Diffusion Checkpoint v1.3 lowerCamelCase_ =self.textaimg_sda_a( prompt=lowerCAmelCase, height=lowerCAmelCase, width=lowerCAmelCase, num_inference_steps=lowerCAmelCase, guidance_scale=lowerCAmelCase, negative_prompt=lowerCAmelCase, num_images_per_prompt=lowerCAmelCase, eta=lowerCAmelCase, generator=lowerCAmelCase, latents=lowerCAmelCase, output_type=lowerCAmelCase, return_dict=lowerCAmelCase, callback=lowerCAmelCase, callback_steps=lowerCAmelCase, **lowerCAmelCase, ) # Get first result from Stable Diffusion Checkpoint v1.4 lowerCamelCase_ =self.textaimg_sda_a( prompt=lowerCAmelCase, height=lowerCAmelCase, width=lowerCAmelCase, num_inference_steps=lowerCAmelCase, guidance_scale=lowerCAmelCase, negative_prompt=lowerCAmelCase, num_images_per_prompt=lowerCAmelCase, eta=lowerCAmelCase, generator=lowerCAmelCase, latents=lowerCAmelCase, output_type=lowerCAmelCase, return_dict=lowerCAmelCase, callback=lowerCAmelCase, callback_steps=lowerCAmelCase, **lowerCAmelCase, ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
75
'''simple docstring''' import math import flax.linen as nn import jax.numpy as jnp def snake_case__ ( _A: jnp.ndarray , _A: int , _A: float = 1 , _A: float = 1 , _A: float = 1.0e4 , _A: bool = False , _A: float = 1.0 , ) -> jnp.ndarray: '''simple docstring''' assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even" lowerCAmelCase = float(embedding_dim // 2 ) lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(_A , dtype=jnp.floataa ) * -log_timescale_increment ) lowerCAmelCase = jnp.expand_dims(_A , 1 ) * jnp.expand_dims(_A , 0 ) # scale embeddings lowerCAmelCase = scale * emb if flip_sin_to_cos: lowerCAmelCase = jnp.concatenate([jnp.cos(_A ), jnp.sin(_A )] , axis=1 ) else: lowerCAmelCase = jnp.concatenate([jnp.sin(_A ), jnp.cos(_A )] , axis=1 ) lowerCAmelCase = jnp.reshape(_A , [jnp.shape(_A )[0], embedding_dim] ) return signal class a__( nn.Module ): '''simple docstring''' UpperCAmelCase_ : int = 3_2 UpperCAmelCase_ : jnp.dtype = jnp.floataa @nn.compact def __call__( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""")(__lowerCAmelCase) lowerCAmelCase = nn.silu(__lowerCAmelCase) lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""")(__lowerCAmelCase) return temb class a__( nn.Module ): '''simple docstring''' UpperCAmelCase_ : int = 3_2 UpperCAmelCase_ : bool = False UpperCAmelCase_ : float = 1 @nn.compact def __call__( self , __lowerCAmelCase): """simple docstring""" return get_sinusoidal_embeddings( __lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
272
0
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand a_ = ( '4S 3H 2C 7S 5H', '9D 8H 2C 6S 7H', '2D 6D 9D TH 7D', 'TC 8C 2S JH 6C', 'JH 8S TH AH QH', 'TS KS 5S 9S AC', 'KD 6S 9D TH AD', 'KS 8D 4D 9S 4S', # pair '8C 4S KH JS 4D', # pair 'QH 8H KD JH 8S', # pair 'KC 4H KS 2H 8D', # pair 'KD 4S KC 3H 8S', # pair 'AH 8S AS KC JH', # pair '3H 4C 4H 3S 2H', # 2 pairs '5S 5D 2C KH KH', # 2 pairs '3C KH 5D 5S KH', # 2 pairs 'AS 3C KH AD KH', # 2 pairs '7C 7S 3S 7H 5S', # 3 of a kind '7C 7S KH 2H 7H', # 3 of a kind 'AC KH QH AH AS', # 3 of a kind '2H 4D 3C AS 5S', # straight (low ace) '3C 5C 4C 2C 6H', # straight '6S 8S 7S 5H 9H', # straight 'JS QS 9H TS KH', # straight 'QC KH TS JS AH', # straight (high ace) '8C 9C 5C 3C TC', # flush '3S 8S 9S 5S KS', # flush '4C 5C 9C 8C KC', # flush 'JH 8H AH KH QH', # flush '3D 2H 3H 2C 2D', # full house '2H 2C 3S 3H 3D', # full house 'KH KC 3S 3H 3D', # full house 'JC 6H JS JD JH', # 4 of a kind 'JC 7H JS JD JH', # 4 of a kind 'JC KH JS JD JH', # 4 of a kind '2S AS 4S 5S 3S', # straight flush (low ace) '2D 6D 3D 4D 5D', # straight flush '5C 6C 3C 7C 4C', # straight flush 'JH 9H TH KH QH', # straight flush 'JH AH TH KH QH', # royal flush (high ace straight flush) ) a_ = ( ('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'), ('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'), ('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'), ('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'), ('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'), ('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'), ('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'), ('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'), ('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'), ('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'), ('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'), ('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'), ('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'), ('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'), ('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'), ('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'), ('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'), ('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'), ('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'), ('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'), ('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'), ('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'), ('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'), ('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'), ('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'), ('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'), ('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'), ('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'), ('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'), ('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'), ('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'), ) a_ = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', True), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', False), ('AS 3S 4S 8S 2S', True), ) a_ = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', False), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', True), ) a_ = ( ('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]), ('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]), ('JH QD KC AS TS', False, [14, 13, 12, 11, 10]), ('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]), ) a_ = ( ('JH AH TH KH QH', 0), ('JH 9H TH KH QH', 0), ('JC KH JS JD JH', 7), ('KH KC 3S 3H 3D', 6), ('8C 9C 5C 3C TC', 0), ('JS QS 9H TS KH', 0), ('7C 7S KH 2H 7H', 3), ('3C KH 5D 5S KH', 2), ('QH 8H KD JH 8S', 1), ('2D 6D 9D TH 7D', 0), ) a_ = ( ('JH AH TH KH QH', 23), ('JH 9H TH KH QH', 22), ('JC KH JS JD JH', 21), ('KH KC 3S 3H 3D', 20), ('8C 9C 5C 3C TC', 19), ('JS QS 9H TS KH', 18), ('7C 7S KH 2H 7H', 17), ('3C KH 5D 5S KH', 16), ('QH 8H KD JH 8S', 15), ('2D 6D 9D TH 7D', 14), ) def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = randrange(len(_a)), randrange(len(_a)) SCREAMING_SNAKE_CASE : List[str] = ["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)] SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowerCamelCase__ ( _a = 100): return (generate_random_hand() for _ in range(_a)) @pytest.mark.parametrize("hand, expected" , _a) def lowerCamelCase__ ( _a , _a): assert PokerHand(_a)._is_flush() == expected @pytest.mark.parametrize("hand, expected" , _a) def lowerCamelCase__ ( _a , _a): assert PokerHand(_a)._is_straight() == expected @pytest.mark.parametrize("hand, expected, card_values" , _a) def lowerCamelCase__ ( _a , _a , _a): SCREAMING_SNAKE_CASE : int = PokerHand(_a) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("hand, expected" , _a) def lowerCamelCase__ ( _a , _a): assert PokerHand(_a)._is_same_kind() == expected @pytest.mark.parametrize("hand, expected" , _a) def lowerCamelCase__ ( _a , _a): assert PokerHand(_a)._hand_type == expected @pytest.mark.parametrize("hand, other, expected" , _a) def lowerCamelCase__ ( _a , _a , _a): assert PokerHand(_a).compare_with(PokerHand(_a)) == expected @pytest.mark.parametrize("hand, other, expected" , generate_random_hands()) def lowerCamelCase__ ( _a , _a , _a): assert PokerHand(_a).compare_with(PokerHand(_a)) == expected def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : int = [PokerHand(_a) for hand in SORTED_HANDS] SCREAMING_SNAKE_CASE : List[Any] = poker_hands.copy() shuffle(_a) SCREAMING_SNAKE_CASE : List[str] = chain(sorted(_a)) for index, hand in enumerate(_a): assert hand == poker_hands[index] def lowerCamelCase__ ( ): # Test that five high straights are compared correctly. SCREAMING_SNAKE_CASE : Optional[int] = [PokerHand("2D AC 3H 4H 5S"), PokerHand("2S 3H 4H 5S 6C")] pokerhands.sort(reverse=_a) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowerCamelCase__ ( ): # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. SCREAMING_SNAKE_CASE : Tuple = PokerHand("2C 4S AS 3D 5C") SCREAMING_SNAKE_CASE : Union[str, Any] = True SCREAMING_SNAKE_CASE : Dict = [5, 4, 3, 2, 14] for _ in range(10): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowerCamelCase__ ( ): # Problem number 54 from Project Euler # Testing from poker_hands.txt file SCREAMING_SNAKE_CASE : List[str] = 0 SCREAMING_SNAKE_CASE : Optional[Any] = os.path.abspath(os.path.dirname(_a)) SCREAMING_SNAKE_CASE : int = os.path.join(_a , "poker_hands.txt") with open(_a) as file_hand: for line in file_hand: SCREAMING_SNAKE_CASE : List[str] = line[:14].strip() SCREAMING_SNAKE_CASE : Union[str, Any] = line[15:].strip() SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = PokerHand(_a), PokerHand(_a) SCREAMING_SNAKE_CASE : List[Any] = player.compare_with(_a) if output == "Win": answer += 1 assert answer == 376
76
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowercase = { '''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NezhaForNextSentencePrediction''', '''NezhaForMaskedLM''', '''NezhaForPreTraining''', '''NezhaForMultipleChoice''', '''NezhaForQuestionAnswering''', '''NezhaForSequenceClassification''', '''NezhaForTokenClassification''', '''NezhaModel''', '''NezhaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
272
0
"""simple docstring""" from random import randint, random def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : bool = False , _lowerCAmelCase : bool = False , _lowerCAmelCase : int = 5 , ): '''simple docstring''' lowercase__ : str = [[-1] * number_of_cells] # Create a highway without any car lowercase__ : Union[str, Any] = 0 lowercase__ : List[Any] = max(_lowerCAmelCase , 0 ) while i < number_of_cells: lowercase__ : Tuple = ( randint(0 , _lowerCAmelCase ) if random_speed else initial_speed ) # Place the cars i += ( randint(1 , max_speed * 2 ) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def a_ ( _lowerCAmelCase : list , _lowerCAmelCase : int ): '''simple docstring''' lowercase__ : Tuple = 0 lowercase__ : List[Any] = highway_now[car_index + 1 :] for cell in range(len(_lowerCAmelCase ) ): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(_lowerCAmelCase , -1 ) def a_ ( _lowerCAmelCase : list , _lowerCAmelCase : float , _lowerCAmelCase : int ): '''simple docstring''' lowercase__ : List[Any] = len(_lowerCAmelCase ) # Beforce calculations, the highway is empty lowercase__ : Any = [-1] * number_of_cells for car_index in range(_lowerCAmelCase ): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed lowercase__ : Dict = min(highway_now[car_index] + 1 , _lowerCAmelCase ) # Number of empty cell before the next car lowercase__ : Any = get_distance(_lowerCAmelCase , _lowerCAmelCase ) - 1 # We can't have the car causing an accident lowercase__ : Any = min(next_highway[car_index] , _lowerCAmelCase ) if random() < probability: # Randomly, a driver will slow down lowercase__ : Tuple = max(next_highway[car_index] - 1 , 0 ) return next_highway def a_ ( _lowerCAmelCase : list , _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : int ): '''simple docstring''' lowercase__ : Any = len(highway[0] ) for i in range(_lowerCAmelCase ): lowercase__ : List[Any] = update(highway[i] , _lowerCAmelCase , _lowerCAmelCase ) lowercase__ : Union[str, Any] = [-1] * number_of_cells for car_index in range(_lowerCAmelCase ): lowercase__ : Any = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) lowercase__ : Optional[int] = (car_index + speed) % number_of_cells # Commit the change of position lowercase__ : Any = speed highway.append(_lowerCAmelCase ) return highway if __name__ == "__main__": import doctest doctest.testmod()
77
'''simple docstring''' from math import sqrt def snake_case__ ( _A: int = 1000000 ) -> int: '''simple docstring''' lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 42 while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_A , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f'{solution() = }')
272
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = """openai-gpt""" __UpperCamelCase = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self :Optional[int] , lowercase_ :Optional[int]=4_04_78 , lowercase_ :List[Any]=5_12 , lowercase_ :List[str]=7_68 , lowercase_ :int=12 , lowercase_ :Dict=12 , lowercase_ :Union[str, Any]="gelu" , lowercase_ :Union[str, Any]=0.1 , lowercase_ :str=0.1 , lowercase_ :List[str]=0.1 , lowercase_ :Optional[Any]=1E-5 , lowercase_ :Optional[int]=0.02 , lowercase_ :Optional[Any]="cls_index" , lowercase_ :List[str]=True , lowercase_ :List[str]=None , lowercase_ :str=True , lowercase_ :int=0.1 , **lowercase_ :List[Any] , ) -> str: UpperCAmelCase = vocab_size UpperCAmelCase = n_positions UpperCAmelCase = n_embd UpperCAmelCase = n_layer UpperCAmelCase = n_head UpperCAmelCase = afn UpperCAmelCase = resid_pdrop UpperCAmelCase = embd_pdrop UpperCAmelCase = attn_pdrop UpperCAmelCase = layer_norm_epsilon UpperCAmelCase = initializer_range UpperCAmelCase = summary_type UpperCAmelCase = summary_use_proj UpperCAmelCase = summary_activation UpperCAmelCase = summary_first_dropout UpperCAmelCase = summary_proj_to_labels super().__init__(**lowercase_ )
78
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowercase = { '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
272
0
'''simple docstring''' from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. lowerCamelCase_ = 2_00 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. lowerCamelCase_ = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. lowerCamelCase_ = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 10_00)) def __lowercase ( __lowercase , __lowercase ) -> tuple[str, float]: '''simple docstring''' _A = len([g for position, g in enumerate(__lowercase ) if g == main_target[position]] ) return (item, float(__lowercase )) def __lowercase ( __lowercase , __lowercase ) -> tuple[str, str]: '''simple docstring''' _A = random.randint(0 , len(__lowercase ) - 1 ) _A = parent_a[:random_slice] + parent_a[random_slice:] _A = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def __lowercase ( __lowercase , __lowercase ) -> str: '''simple docstring''' _A = list(__lowercase ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: _A = random.choice(__lowercase ) return "".join(__lowercase ) def __lowercase ( __lowercase , __lowercase , __lowercase , ) -> list[str]: '''simple docstring''' _A = [] # Generate more children proportionally to the fitness score. _A = int(parent_a[1] * 100 ) + 1 _A = 10 if child_n >= 10 else child_n for _ in range(__lowercase ): _A = population_score[random.randint(0 , __lowercase )][0] _A , _A = crossover(parent_a[0] , __lowercase ) # Append new string to the population list. pop.append(mutate(__lowercase , __lowercase ) ) pop.append(mutate(__lowercase , __lowercase ) ) return pop def __lowercase ( __lowercase , __lowercase , __lowercase = True ) -> tuple[int, int, str]: '''simple docstring''' if N_POPULATION < N_SELECTED: _A = F'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(__lowercase ) # Verify that the target contains no genes besides the ones inside genes variable. _A = sorted({c for c in target if c not in genes} ) if not_in_genes_list: _A = F'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(__lowercase ) # Generate random starting population. _A = [] for _ in range(__lowercase ): population.append("".join([random.choice(__lowercase ) for i in range(len(__lowercase ) )] ) ) # Just some logs to know what the algorithms is doing. _A , _A = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(__lowercase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. _A = [evaluate(__lowercase , __lowercase ) for item in population] # Check if there is a matching evolution. _A = sorted(__lowercase , key=lambda __lowercase : x[1] , reverse=__lowercase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F'''\nGeneration: {generation}''' F'''\nTotal Population:{total_population}''' F'''\nBest score: {population_score[0][1]}''' F'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. _A = population[: int(N_POPULATION / 3 )] population.clear() population.extend(__lowercase ) # Normalize population score to be between 0 and 1. _A = [ (item, score / len(__lowercase )) for item, score in population_score ] # This is selection for i in range(__lowercase ): population.extend(select(population_score[int(__lowercase )] , __lowercase , __lowercase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(__lowercase ) > N_POPULATION: break if __name__ == "__main__": lowerCamelCase_ = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) lowerCamelCase_ = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = basic(target_str, genes_list) print( F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}""" )
79
'''simple docstring''' import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class a__( unittest.TestCase ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=[0.5, 0.5, 0.5] , __lowerCAmelCase=[0.5, 0.5, 0.5] , ): """simple docstring""" lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18} lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = num_channels lowerCAmelCase = image_size lowerCAmelCase = min_resolution lowerCAmelCase = max_resolution lowerCAmelCase = do_resize lowerCAmelCase = size lowerCAmelCase = do_normalize lowerCAmelCase = image_mean lowerCAmelCase = image_std def a_ ( self): """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class a__( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Tuple = DPTImageProcessor if is_vision_available() else None def a_ ( self): """simple docstring""" lowerCAmelCase = DPTImageProcessingTester(self) @property def a_ ( self): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__lowerCAmelCase , """image_mean""")) self.assertTrue(hasattr(__lowerCAmelCase , """image_std""")) self.assertTrue(hasattr(__lowerCAmelCase , """do_normalize""")) self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""")) self.assertTrue(hasattr(__lowerCAmelCase , """size""")) def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18}) lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42}) def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict) # create random PIL images lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor) # Test not batched input lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched lowerCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
272
0
'''simple docstring''' from __future__ import annotations def _UpperCamelCase ( __A ) -> bool: '''simple docstring''' UpperCamelCase__ = str(__A ) return n == n[::-1] def _UpperCamelCase ( __A = 1000000 ) -> Tuple: '''simple docstring''' UpperCamelCase__ = 0 for i in range(1 , __A ): if is_palindrome(__A ) and is_palindrome(bin(__A ).split("b" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
80
'''simple docstring''' from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def snake_case__ ( _A: Union[str, Any] , _A: Tuple , _A: Any=1e-12 ) -> str: '''simple docstring''' lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T lowerCAmelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_A , axis=1 ) , a_min=_A ) ).T return jnp.matmul(_A , norm_emb_a.T ) class a__( nn.Module ): '''simple docstring''' UpperCAmelCase_ : CLIPConfig UpperCAmelCase_ : jnp.dtype = jnp.floataa def a_ ( self): """simple docstring""" lowerCAmelCase = FlaxCLIPVisionModule(self.config.vision_config) lowerCAmelCase = nn.Dense(self.config.projection_dim , use_bias=__lowerCAmelCase , dtype=self.dtype) lowerCAmelCase = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim)) lowerCAmelCase = self.param( """special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim)) lowerCAmelCase = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,)) lowerCAmelCase = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,)) def __call__( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = self.vision_model(__lowerCAmelCase)[1] lowerCAmelCase = self.visual_projection(__lowerCAmelCase) lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.special_care_embeds) lowerCAmelCase = jax_cosine_distance(__lowerCAmelCase , self.concept_embeds) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs lowerCAmelCase = 0.0 lowerCAmelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment lowerCAmelCase = jnp.round(__lowerCAmelCase , 3) lowerCAmelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=__lowerCAmelCase) # Use a lower threshold if an image has any special care concept lowerCAmelCase = is_special_care * 0.01 lowerCAmelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment lowerCAmelCase = jnp.round(__lowerCAmelCase , 3) lowerCAmelCase = jnp.any(concept_scores > 0 , axis=1) return has_nsfw_concepts class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : int = CLIPConfig UpperCAmelCase_ : Any = '''clip_input''' UpperCAmelCase_ : List[str] = FlaxStableDiffusionSafetyCheckerModule def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = jnp.floataa , __lowerCAmelCase = True , **__lowerCAmelCase , ): """simple docstring""" if input_shape is None: lowerCAmelCase = (1, 224, 224, 3) lowerCAmelCase = self.module_class(config=__lowerCAmelCase , dtype=__lowerCAmelCase , **__lowerCAmelCase) super().__init__(__lowerCAmelCase , __lowerCAmelCase , input_shape=__lowerCAmelCase , seed=__lowerCAmelCase , dtype=__lowerCAmelCase , _do_init=_do_init) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None): """simple docstring""" lowerCAmelCase = jax.random.normal(__lowerCAmelCase , __lowerCAmelCase) lowerCAmelCase , lowerCAmelCase = jax.random.split(__lowerCAmelCase) lowerCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng} lowerCAmelCase = self.module.init(__lowerCAmelCase , __lowerCAmelCase)["""params"""] return random_params def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , ): """simple docstring""" lowerCAmelCase = jnp.transpose(__lowerCAmelCase , (0, 2, 3, 1)) return self.module.apply( {"""params""": params or self.params} , jnp.array(__lowerCAmelCase , dtype=jnp.floataa) , rngs={} , )
272
0
"""simple docstring""" import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate lowerCamelCase_ : Optional[Any] = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("""""", """|""", """|"""), datarow=DataRow("""""", """|""", """|"""), padding=1, with_header_hide=None, ) lowerCamelCase_ : List[Any] = [] lowerCamelCase_ : Optional[Any] = [] lowerCamelCase_ : Optional[Any] = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}} lowerCamelCase_ : Dict = [ { """type""": """header""", """text""": { """type""": """plain_text""", """text""": F'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results', """emoji""": True, }, } ] lowerCamelCase_ : List[Any] = 0 for log in Path().glob("""*.log"""): lowerCamelCase_ : List[Any] = 0 with open(log, """r""") as f: for line in f: lowerCamelCase_ : List[Any] = json.loads(line) if line.get("""nodeid""", """""") != "": lowerCamelCase_ : Any = line["""nodeid"""] if line.get("""duration""", None) is not None: lowerCamelCase_ : List[Any] = F'{line["duration"]:.4f}' if line.get("""outcome""", """""") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("""_""")[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) lowerCamelCase_ : List[str] = [] log.unlink() lowerCamelCase_ : List[Any] = """""" lowerCamelCase_ : List[Any] = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += F"*{name[1:]}: {num_failed} failed test*\n" else: message += F"*{name[1:]}: {num_failed} failed tests*\n" lowerCamelCase_ : Optional[Any] = [] lowerCamelCase_ : Optional[int] = {} for test in failed_tests: lowerCamelCase_ : Union[str, Any] = test[0].split("""::""") lowerCamelCase_ : Union[str, Any] = data[0].split("""/""")[-1] if data[0] not in filesafailed: lowerCamelCase_ : str = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) lowerCamelCase_ : Optional[Any] = [test[0] for test in failed_table] lowerCamelCase_ : str = list(set(files)) # Count number of instances in failed_tests lowerCamelCase_ : int = [] for file in individual_files: table.append([file, len(filesafailed[file])]) lowerCamelCase_ : str = tabulate( table, headers=["""Test Location""", """Num Failed"""], tablefmt=hf_table_format, stralign="""right""", ) message += F"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3_0_0_0: lowerCamelCase_ : int = """Too many failed tests, please see the full report in the Action results.""" lowerCamelCase_ : str = len(err) + 1_0 lowerCamelCase_ : Optional[Any] = message[: 3_0_0_0 - offset] + F'\n...\n```\n{err}' print(F'### {message}') else: lowerCamelCase_ : Optional[Any] = """No failed tests! 🤗""" print(F'## {message}') payload.append(no_error_payload) if os.environ.get("""TEST_TYPE""", """""") != "": from slack_sdk import WebClient lowerCamelCase_ : List[Any] = WebClient(token=os.environ["""SLACK_API_TOKEN"""]) if message != "No failed tests! 🤗": lowerCamelCase_ : Any = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": message, }, } payload.append(md_report) lowerCamelCase_ : Dict = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": """*For more details:*""", }, """accessory""": { """type""": """button""", """text""": { """type""": """plain_text""", """text""": """Check Action results""", """emoji""": True, }, """url""": F'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } payload.append(action_button) lowerCamelCase_ : Optional[Any] = { """type""": """context""", """elements""": [ { """type""": """plain_text""", """text""": F'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}', } ], } payload.append(date_report) lowerCamelCase_ : Optional[int] = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload) lowerCamelCase_ : Union[str, Any] = response.data["""ts"""] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name lowerCamelCase_ : int = """""" for i, row in enumerate(test_failures): if row[0] != test_class: lowerCamelCase_ : str = row[0] else: lowerCamelCase_ : str = """""" lowerCamelCase_ : Dict = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": F'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```', }, } client.chat_postMessage( channel="""#accelerate-ci-daily""", thread_ts=ts, blocks=[payload], )
81
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class a__( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Dict = MvpTokenizer UpperCAmelCase_ : Optional[Any] = MvpTokenizerFast UpperCAmelCase_ : str = True UpperCAmelCase_ : List[Any] = filter_roberta_detectors def a_ ( self): """simple docstring""" super().setUp() lowerCAmelCase = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase)))) lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] lowerCAmelCase = {"""unk_token""": """<unk>"""} lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(__lowerCAmelCase) + """\n""") with open(self.merges_file , """w""" , encoding="""utf-8""") as fp: fp.write("""\n""".join(__lowerCAmelCase)) def a_ ( self , **__lowerCAmelCase): """simple docstring""" kwargs.update(self.special_tokens_map) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase) def a_ ( self , **__lowerCAmelCase): """simple docstring""" kwargs.update(self.special_tokens_map) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase) def a_ ( self , __lowerCAmelCase): """simple docstring""" return "lower newer", "lower newer" @cached_property def a_ ( self): """simple docstring""" return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""") @cached_property def a_ ( self): """simple docstring""" return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""") @require_torch def a_ ( self): """simple docstring""" lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] lowerCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase = tokenizer(__lowerCAmelCase , max_length=len(__lowerCAmelCase) , padding=__lowerCAmelCase , return_tensors="""pt""") self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase) self.assertEqual((2, 9) , batch.input_ids.shape) self.assertEqual((2, 9) , batch.attention_mask.shape) lowerCAmelCase = batch.input_ids.tolist()[0] self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) # Test that special tokens are reset @require_torch def a_ ( self): """simple docstring""" lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""") # check if input_ids are returned and no labels self.assertIn("""input_ids""" , __lowerCAmelCase) self.assertIn("""attention_mask""" , __lowerCAmelCase) self.assertNotIn("""labels""" , __lowerCAmelCase) self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase) @require_torch def a_ ( self): """simple docstring""" lowerCAmelCase = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase = tokenizer(text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""") self.assertEqual(32 , targets["""input_ids"""].shape[1]) @require_torch def a_ ( self): """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase = tokenizer( ["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""") self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase) self.assertEqual(batch.input_ids.shape , (2, 1024)) @require_torch def a_ ( self): """simple docstring""" lowerCAmelCase = ["""A long paragraph for summarization."""] lowerCAmelCase = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase , return_tensors="""pt""") lowerCAmelCase = inputs["""input_ids"""] lowerCAmelCase = inputs["""labels"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item()) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item()) def a_ ( self): """simple docstring""" pass def a_ ( self): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = """A, <mask> AllenNLP sentence.""" lowerCAmelCase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase) lowerCAmelCase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""])) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , ) lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""]) lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""]) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual( __lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""]) self.assertSequenceEqual( __lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
272
0
import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() A__ = logging.get_logger(__name__) def _UpperCAmelCase ( snake_case ): """simple docstring""" _lowerCAmelCase = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): _lowerCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): _lowerCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 _lowerCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] _lowerCAmelCase = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(snake_case )-1}' ) if "norm" in key: _lowerCAmelCase = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 _lowerCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] _lowerCAmelCase = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(snake_case )-1}' ) if "layer_norm1" in key: _lowerCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: _lowerCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 _lowerCAmelCase = key[key.find("""block""" ) + len("""block""" )] _lowerCAmelCase = key.replace(F'block{idx}' , F'block.{int(snake_case )-1}' ) if "attn.q" in key: _lowerCAmelCase = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: _lowerCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: _lowerCAmelCase = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: _lowerCAmelCase = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: _lowerCAmelCase = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: _lowerCAmelCase = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: _lowerCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) _lowerCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 _lowerCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )] _lowerCAmelCase = key.replace(F'linear_c{idx}' , F'linear_c.{int(snake_case )-1}' ) if "bot_conv" in key: _lowerCAmelCase = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: _lowerCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: _lowerCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: _lowerCAmelCase = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: _lowerCAmelCase = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: _lowerCAmelCase = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: _lowerCAmelCase = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): _lowerCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" ) _lowerCAmelCase = value return new_state_dict def _UpperCAmelCase ( snake_case , snake_case ): """simple docstring""" for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) _lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' ) _lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' ) # next, add keys and values (in that order) to the state dict _lowerCAmelCase = kv_weight[ : config.hidden_sizes[i], : ] _lowerCAmelCase = kv_bias[: config.hidden_sizes[i]] _lowerCAmelCase = kv_weight[ config.hidden_sizes[i] :, : ] _lowerCAmelCase = kv_bias[config.hidden_sizes[i] :] def _UpperCAmelCase ( ): """simple docstring""" _lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" _lowerCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw ) return image @torch.no_grad() def _UpperCAmelCase ( snake_case , snake_case , snake_case=False , snake_case=None ): """simple docstring""" _lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) _lowerCAmelCase = GLPNImageProcessor() # prepare image _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=snake_case , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict _lowerCAmelCase = torch.load(snake_case , map_location=torch.device("""cpu""" ) ) # rename keys _lowerCAmelCase = rename_keys(snake_case ) # key and value matrices need special treatment read_in_k_v(snake_case , snake_case ) # create HuggingFace model and load state dict _lowerCAmelCase = GLPNForDepthEstimation(snake_case ) model.load_state_dict(snake_case ) model.eval() # forward pass _lowerCAmelCase = model(snake_case ) _lowerCAmelCase = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: _lowerCAmelCase = torch.tensor( [[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] ) elif "kitti" in model_name: _lowerCAmelCase = torch.tensor( [[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] ) else: raise ValueError(F'Unknown model name: {model_name}' ) _lowerCAmelCase = torch.Size([1, 4_80, 6_40] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , snake_case , atol=1E-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(snake_case , snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=snake_case , ) image_processor.push_to_hub( repo_path_or_name=Path(snake_case , snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=snake_case , ) if __name__ == "__main__": A__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) parser.add_argument( """--model_name""", default="""glpn-kitti""", type=str, help="""Name of the model in case you're pushing to the hub.""", ) A__ = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
82
'''simple docstring''' import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class a__( enum.Enum ): '''simple docstring''' UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : Dict = 1 UpperCAmelCase_ : Any = 2 @add_end_docstrings(lowerCAmelCase__ ) class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : int = ''' In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> ''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" super().__init__(*__lowerCAmelCase , **__lowerCAmelCase) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. lowerCAmelCase = None if self.model.config.prefix is not None: lowerCAmelCase = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. lowerCAmelCase = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._sanitize_parameters(prefix=__lowerCAmelCase , **self._forward_params) lowerCAmelCase = {**self._preprocess_params, **preprocess_params} lowerCAmelCase = {**self._forward_params, **forward_params} def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ): """simple docstring""" lowerCAmelCase = {} if prefix is not None: lowerCAmelCase = prefix if prefix: lowerCAmelCase = self.tokenizer( __lowerCAmelCase , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework) lowerCAmelCase = prefix_inputs["""input_ids"""].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected" """ [None, 'hole']""") lowerCAmelCase = handle_long_generation preprocess_params.update(__lowerCAmelCase) lowerCAmelCase = generate_kwargs lowerCAmelCase = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""") if return_tensors is not None: raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""") lowerCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""") lowerCAmelCase = ReturnType.TENSORS if return_type is not None: lowerCAmelCase = return_type if clean_up_tokenization_spaces is not None: lowerCAmelCase = clean_up_tokenization_spaces if stop_sequence is not None: lowerCAmelCase = self.tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase) if len(__lowerCAmelCase) > 1: warnings.warn( """Stopping on a multiple token sequence is not yet supported on transformers. The first token of""" """ the stop sequence will be used as the stop sequence string in the interim.""") lowerCAmelCase = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({"""add_space_before_punct_symbol""": True}) return super()._parse_and_tokenize(*__lowerCAmelCase , **__lowerCAmelCase) def __call__( self , __lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" return super().__call__(__lowerCAmelCase , **__lowerCAmelCase) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase=None , **__lowerCAmelCase): """simple docstring""" lowerCAmelCase = self.tokenizer( prefix + prompt_text , padding=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors=self.framework) lowerCAmelCase = prompt_text if handle_long_generation == "hole": lowerCAmelCase = inputs["""input_ids"""].shape[-1] if "max_new_tokens" in generate_kwargs: lowerCAmelCase = generate_kwargs["""max_new_tokens"""] else: lowerCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length) - cur_len if new_tokens < 0: raise ValueError("""We cannot infer how many new tokens are expected""") if cur_len + new_tokens > self.tokenizer.model_max_length: lowerCAmelCase = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( """We cannot use `hole` to handle this generation the number of desired tokens exceeds the""" """ models max length""") lowerCAmelCase = inputs["""input_ids"""][:, -keep_length:] if "attention_mask" in inputs: lowerCAmelCase = inputs["""attention_mask"""][:, -keep_length:] return inputs def a_ ( self , __lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" lowerCAmelCase = model_inputs["""input_ids"""] lowerCAmelCase = model_inputs.get("""attention_mask""" , __lowerCAmelCase) # Allow empty prompts if input_ids.shape[1] == 0: lowerCAmelCase = None lowerCAmelCase = None lowerCAmelCase = 1 else: lowerCAmelCase = input_ids.shape[0] lowerCAmelCase = model_inputs.pop("""prompt_text""") # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. lowerCAmelCase = generate_kwargs.pop("""prefix_length""" , 0) if prefix_length > 0: lowerCAmelCase = """max_new_tokens""" in generate_kwargs or ( """generation_config""" in generate_kwargs and generate_kwargs["""generation_config"""].max_new_tokens is not None ) if not has_max_new_tokens: lowerCAmelCase = generate_kwargs.get("""max_length""") or self.model.config.max_length generate_kwargs["max_length"] += prefix_length lowerCAmelCase = """min_new_tokens""" in generate_kwargs or ( """generation_config""" in generate_kwargs and generate_kwargs["""generation_config"""].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL lowerCAmelCase = self.model.generate(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = generated_sequence.shape[0] if self.framework == "pt": lowerCAmelCase = generated_sequence.reshape(__lowerCAmelCase , out_b // in_b , *generated_sequence.shape[1:]) elif self.framework == "tf": lowerCAmelCase = tf.reshape(__lowerCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:])) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def a_ ( self , __lowerCAmelCase , __lowerCAmelCase=ReturnType.FULL_TEXT , __lowerCAmelCase=True): """simple docstring""" lowerCAmelCase = model_outputs["""generated_sequence"""][0] lowerCAmelCase = model_outputs["""input_ids"""] lowerCAmelCase = model_outputs["""prompt_text"""] lowerCAmelCase = generated_sequence.numpy().tolist() lowerCAmelCase = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: lowerCAmelCase = {"""generated_token_ids""": sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text lowerCAmelCase = self.tokenizer.decode( __lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: lowerCAmelCase = 0 else: lowerCAmelCase = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase , )) if return_type == ReturnType.FULL_TEXT: lowerCAmelCase = prompt_text + text[prompt_length:] else: lowerCAmelCase = text[prompt_length:] lowerCAmelCase = {"""generated_text""": all_text} records.append(__lowerCAmelCase) return records
272
0
'''simple docstring''' from abc import ABC, abstractmethod from argparse import ArgumentParser class lowercase__ ( lowercase ): @staticmethod @abstractmethod def UpperCamelCase_ ( lowerCamelCase__ : ArgumentParser ): '''simple docstring''' raise NotImplementedError() @abstractmethod def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' raise NotImplementedError()
83
'''simple docstring''' def snake_case__ ( _A: str ) -> list[int]: '''simple docstring''' lowerCAmelCase = [0 for i in range(len(_A ) )] # initialize interval's left pointer and right pointer lowerCAmelCase , lowerCAmelCase = 0, 0 for i in range(1 , len(_A ) ): # case when current index is inside the interval if i <= right_pointer: lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] ) lowerCAmelCase = min_edge while go_next(_A , _A , _A ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: lowerCAmelCase , lowerCAmelCase = i, i + z_result[i] - 1 return z_result def snake_case__ ( _A: int , _A: list[int] , _A: str ) -> bool: '''simple docstring''' return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]] def snake_case__ ( _A: str , _A: str ) -> int: '''simple docstring''' lowerCAmelCase = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string lowerCAmelCase = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(_A ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
272
0
"""simple docstring""" from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def _snake_case ( lowercase__ : int ) -> int: '''simple docstring''' lowerCAmelCase_ :str = prime_factors(lowercase__ ) if is_square_free(lowercase__ ): return -1 if len(lowercase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
84
'''simple docstring''' from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : str = '''EncodecFeatureExtractor''' UpperCAmelCase_ : Dict = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" super().__init__(__lowerCAmelCase , __lowerCAmelCase) lowerCAmelCase = self.feature_extractor lowerCAmelCase = False def a_ ( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True): """simple docstring""" return self.tokenizer.get_decoder_prompt_ids(task=__lowerCAmelCase , language=__lowerCAmelCase , no_timestamps=__lowerCAmelCase) def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase) lowerCAmelCase = kwargs.pop("""sampling_rate""" , __lowerCAmelCase) lowerCAmelCase = kwargs.pop("""text""" , __lowerCAmelCase) if len(__lowerCAmelCase) > 0: lowerCAmelCase = args[0] lowerCAmelCase = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""") if text is not None: lowerCAmelCase = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase) if audio is not None: lowerCAmelCase = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase) if audio is None: return inputs elif text is None: return audio_inputs else: lowerCAmelCase = audio_inputs["""input_values"""] if "padding_mask" in audio_inputs: lowerCAmelCase = audio_inputs["""padding_mask"""] return inputs def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" lowerCAmelCase = kwargs.pop("""audio""" , __lowerCAmelCase) lowerCAmelCase = kwargs.pop("""padding_mask""" , __lowerCAmelCase) if len(__lowerCAmelCase) > 0: lowerCAmelCase = args[0] lowerCAmelCase = args[1:] if audio_values is not None: return self._decode_audio(__lowerCAmelCase , padding_mask=__lowerCAmelCase) else: return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase) def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None): """simple docstring""" lowerCAmelCase = to_numpy(__lowerCAmelCase) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = audio_values.shape if padding_mask is None: return list(__lowerCAmelCase) lowerCAmelCase = to_numpy(__lowerCAmelCase) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) lowerCAmelCase = seq_len - padding_mask.shape[-1] lowerCAmelCase = 1 - self.feature_extractor.padding_value lowerCAmelCase = np.pad(__lowerCAmelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__lowerCAmelCase) lowerCAmelCase = audio_values.tolist() for i in range(__lowerCAmelCase): lowerCAmelCase = np.asarray(audio_values[i])[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] lowerCAmelCase = sliced_audio.reshape(__lowerCAmelCase , -1) return audio_values
272
0
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _snake_case ( lowercase_ , unittest.TestCase ): lowerCAmelCase_ : List[str] = LongformerTokenizer lowerCAmelCase_ : Optional[Any] = True lowerCAmelCase_ : List[str] = LongformerTokenizerFast lowerCAmelCase_ : Any = True def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case_ = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] snake_case_ = dict(zip(a__ , range(len(a__ ) ) ) ) snake_case_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] snake_case_ = {"unk_token": "<unk>"} snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(a__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(a__ ) ) def lowerCAmelCase__ ( self , **a__ ) -> List[Any]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **a__ ) def lowerCAmelCase__ ( self , **a__ ) -> Union[str, Any]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **a__ ) def lowerCAmelCase__ ( self , a__ ) -> Union[str, Any]: '''simple docstring''' snake_case_ = "lower newer" snake_case_ = "lower newer" return input_text, output_text def lowerCAmelCase__ ( self ) -> Any: '''simple docstring''' snake_case_ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) snake_case_ = "lower newer" snake_case_ = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] snake_case_ = tokenizer.tokenize(a__ ) # , add_prefix_space=True) self.assertListEqual(a__ , a__ ) snake_case_ = tokens + [tokenizer.unk_token] snake_case_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ ) def lowerCAmelCase__ ( self ) -> Any: '''simple docstring''' snake_case_ = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=a__ ) , [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=a__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , ) @slow def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' snake_case_ = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" ) snake_case_ = tokenizer.encode("sequence builders" , add_special_tokens=a__ ) snake_case_ = tokenizer.encode("multi-sequence build" , add_special_tokens=a__ ) snake_case_ = tokenizer.encode( "sequence builders" , add_special_tokens=a__ , add_prefix_space=a__ ) snake_case_ = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=a__ , add_prefix_space=a__ ) snake_case_ = tokenizer.build_inputs_with_special_tokens(a__ ) snake_case_ = tokenizer.build_inputs_with_special_tokens(a__ , a__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' snake_case_ = self.get_tokenizer() snake_case_ = "Encode this sequence." snake_case_ = tokenizer.byte_encoder[" ".encode("utf-8" )[0]] # Testing encoder arguments snake_case_ = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ ) snake_case_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(a__ , a__ ) snake_case_ = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ ) snake_case_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(a__ , a__ ) tokenizer.add_special_tokens({"bos_token": "<s>"} ) snake_case_ = tokenizer.encode(a__ , add_special_tokens=a__ ) snake_case_ = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(a__ , a__ ) # Testing spaces after special tokens snake_case_ = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(a__ , lstrip=a__ , rstrip=a__ )} ) # mask token has a left space snake_case_ = tokenizer.convert_tokens_to_ids(a__ ) snake_case_ = "Encode <mask> sequence" snake_case_ = "Encode <mask>sequence" snake_case_ = tokenizer.encode(a__ ) snake_case_ = encoded.index(a__ ) snake_case_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(a__ , a__ ) snake_case_ = tokenizer.encode(a__ ) snake_case_ = encoded.index(a__ ) snake_case_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(a__ , a__ ) def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' pass def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): snake_case_ = self.rust_tokenizer_class.from_pretrained(a__ , **a__ ) snake_case_ = self.tokenizer_class.from_pretrained(a__ , **a__ ) snake_case_ = "A, <mask> AllenNLP sentence." snake_case_ = tokenizer_r.encode_plus(a__ , add_special_tokens=a__ , return_token_type_ids=a__ ) snake_case_ = tokenizer_p.encode_plus(a__ , add_special_tokens=a__ , return_token_type_ids=a__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) snake_case_ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) snake_case_ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( a__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( a__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def lowerCAmelCase__ ( self ) -> Any: '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): snake_case_ = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ ) snake_case_ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) snake_case_ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["add_prefix_space"] , a__ ) self.assertEqual(post_processor_state["add_prefix_space"] , a__ ) self.assertEqual(post_processor_state["trim_offsets"] , a__ ) def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): snake_case_ = "hello" # `hello` is a token in the vocabulary of `pretrained_name` snake_case_ = F'{text_of_1_token} {text_of_1_token}' snake_case_ = self.rust_tokenizer_class.from_pretrained( a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ ) snake_case_ = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(a__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(a__ ) + 1, len(a__ ) + 1 + len(a__ )) , ) snake_case_ = self.rust_tokenizer_class.from_pretrained( a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ ) snake_case_ = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(a__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(a__ ) + 1, len(a__ ) + 1 + len(a__ )) , ) snake_case_ = self.rust_tokenizer_class.from_pretrained( a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ ) snake_case_ = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(a__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(a__ ), len(a__ ) + 1 + len(a__ )) , ) snake_case_ = self.rust_tokenizer_class.from_pretrained( a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ ) snake_case_ = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(a__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(a__ ), len(a__ ) + 1 + len(a__ )) , ) snake_case_ = F' {text}' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) snake_case_ = self.rust_tokenizer_class.from_pretrained( a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ ) snake_case_ = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(a__ ) + 1, 1 + len(a__ ) + 1 + len(a__ )) , ) snake_case_ = self.rust_tokenizer_class.from_pretrained( a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ ) snake_case_ = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(a__ ), 1 + len(a__ ) + 1 + len(a__ )) , ) snake_case_ = self.rust_tokenizer_class.from_pretrained( a__ , use_fast=a__ , add_prefix_space=a__ , trim_offsets=a__ ) snake_case_ = tokenizer_r(a__ , return_offsets_mapping=a__ , add_special_tokens=a__ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(a__ ), 1 + len(a__ ) + 1 + len(a__ )) , )
85
'''simple docstring''' import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class a__( unittest.TestCase ): '''simple docstring''' @property def a_ ( self): """simple docstring""" torch.manual_seed(0) lowerCAmelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def a_ ( self): """simple docstring""" lowerCAmelCase = self.dummy_uncond_unet lowerCAmelCase = PNDMScheduler() lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase) pndm.to(__lowerCAmelCase) pndm.set_progress_bar_config(disable=__lowerCAmelCase) lowerCAmelCase = torch.manual_seed(0) lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""").images lowerCAmelCase = torch.manual_seed(0) lowerCAmelCase = pndm(generator=__lowerCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=__lowerCAmelCase)[0] lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 @slow @require_torch class a__( unittest.TestCase ): '''simple docstring''' def a_ ( self): """simple docstring""" lowerCAmelCase = """google/ddpm-cifar10-32""" lowerCAmelCase = UNetaDModel.from_pretrained(__lowerCAmelCase) lowerCAmelCase = PNDMScheduler() lowerCAmelCase = PNDMPipeline(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase) pndm.to(__lowerCAmelCase) pndm.set_progress_bar_config(disable=__lowerCAmelCase) lowerCAmelCase = torch.manual_seed(0) lowerCAmelCase = pndm(generator=__lowerCAmelCase , output_type="""numpy""").images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
272
0
"""simple docstring""" from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def __lowerCAmelCase (_UpperCamelCase = "laptop" ): __lowerCAmelCase : Any = F"https://www.amazon.in/laptop/s?k={product}" __lowerCAmelCase : Dict = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36', 'Accept-Language': 'en-US, en;q=0.5', } __lowerCAmelCase : List[Any] = BeautifulSoup(requests.get(_UpperCamelCase , headers=_UpperCamelCase ).text ) # Initialize a Pandas dataframe with the column titles __lowerCAmelCase : Union[str, Any] = DataFrame( columns=[ 'Product Title', 'Product Link', 'Current Price of the product', 'Product Rating', 'MRP of the product', 'Discount', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( 'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ): try: __lowerCAmelCase : str = item.ha.text __lowerCAmelCase : Tuple = 'https://www.amazon.in/' + item.ha.a['href'] __lowerCAmelCase : Optional[Any] = item.find('span' , attrs={'class': 'a-offscreen'} ).text try: __lowerCAmelCase : List[Any] = item.find('span' , attrs={'class': 'a-icon-alt'} ).text except AttributeError: __lowerCAmelCase : List[str] = 'Not available' try: __lowerCAmelCase : Any = ( '₹' + item.find( 'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1] ) except AttributeError: __lowerCAmelCase : Optional[int] = '' try: __lowerCAmelCase : List[Any] = float( ( ( float(product_mrp.strip('₹' ).replace(',' , '' ) ) - float(product_price.strip('₹' ).replace(',' , '' ) ) ) / float(product_mrp.strip('₹' ).replace(',' , '' ) ) ) * 100 ) except ValueError: __lowerCAmelCase : str = float('nan' ) except AttributeError: pass __lowerCAmelCase : str = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] __lowerCAmelCase : List[Any] = ' ' __lowerCAmelCase : Tuple = ' ' data_frame.index += 1 return data_frame if __name__ == "__main__": lowerCamelCase__ = """headphones""" get_amazon_product_data(product).to_csv(f'Amazon Product Data for {product}.csv')
86
'''simple docstring''' from string import ascii_lowercase, ascii_uppercase def snake_case__ ( _A: str ) -> str: '''simple docstring''' if not sentence: return "" lowerCAmelCase = dict(zip(_A , _A ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
272
0
import requests def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : str): lowercase__ : List[str] = {"Content-Type": "application/json"} lowercase__ : List[Any] = requests.post(_lowerCamelCase , json={"text": message_body} , headers=_lowerCamelCase) if response.status_code != 200: lowercase__ : int = ( "Request to slack returned an error " f'''{response.status_code}, the response is:\n{response.text}''' ) raise ValueError(_lowerCamelCase) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
87
'''simple docstring''' import os import string import sys __lowercase = 1 << 8 __lowercase = { '''tab''': ord('''\t'''), '''newline''': ord('''\r'''), '''esc''': 2_7, '''up''': 6_5 + ARROW_KEY_FLAG, '''down''': 6_6 + ARROW_KEY_FLAG, '''right''': 6_7 + ARROW_KEY_FLAG, '''left''': 6_8 + ARROW_KEY_FLAG, '''mod_int''': 9_1, '''undefined''': sys.maxsize, '''interrupt''': 3, '''insert''': 5_0, '''delete''': 5_1, '''pg_up''': 5_3, '''pg_down''': 5_4, } __lowercase = KEYMAP['''up'''] __lowercase = KEYMAP['''left'''] if sys.platform == "win32": __lowercase = [] __lowercase = { B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, } for i in range(1_0): __lowercase = ord(str(i)) def snake_case__ ( ) -> List[Any]: '''simple docstring''' if os.name == "nt": import msvcrt lowerCAmelCase = """mbcs""" # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(_A ) == 0: # Read the keystroke lowerCAmelCase = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): lowerCAmelCase = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: lowerCAmelCase = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) ) WIN_CH_BUFFER.append(_A ) if ord(_A ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) lowerCAmelCase = chr(KEYMAP["""esc"""] ) except KeyError: lowerCAmelCase = cha[1] else: lowerCAmelCase = ch.decode(_A ) else: lowerCAmelCase = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty lowerCAmelCase = sys.stdin.fileno() lowerCAmelCase = termios.tcgetattr(_A ) try: tty.setraw(_A ) lowerCAmelCase = sys.stdin.read(1 ) finally: termios.tcsetattr(_A , termios.TCSADRAIN , _A ) return ch def snake_case__ ( ) -> Tuple: '''simple docstring''' lowerCAmelCase = get_raw_chars() if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(_A ) == KEYMAP["esc"]: lowerCAmelCase = get_raw_chars() if ord(_A ) == KEYMAP["mod_int"]: lowerCAmelCase = get_raw_chars() if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(_A ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
272
0
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase : Union[str, Any] = {'configuration_mmbt': ['MMBTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings'] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
88
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __lowercase = logging.get_logger(__name__) class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = ['''input_features'''] def __init__( self , __lowerCAmelCase=80 , __lowerCAmelCase=16000 , __lowerCAmelCase=160 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , **__lowerCAmelCase , ): """simple docstring""" super().__init__( feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , ) lowerCAmelCase = n_fft lowerCAmelCase = hop_length lowerCAmelCase = chunk_length lowerCAmelCase = chunk_length * sampling_rate lowerCAmelCase = self.n_samples // hop_length lowerCAmelCase = sampling_rate lowerCAmelCase = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , ) def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = spectrogram( __lowerCAmelCase , window_function(self.n_fft , """hann""") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , ) lowerCAmelCase = log_spec[:, :-1] lowerCAmelCase = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0) lowerCAmelCase = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0): """simple docstring""" if attention_mask is not None: lowerCAmelCase = np.array(__lowerCAmelCase , np.intaa) lowerCAmelCase = [] for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1)): lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7) if length < normed_slice.shape[0]: lowerCAmelCase = padding_value normed_input_values.append(__lowerCAmelCase) else: lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values] return normed_input_values def __call__( self , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "max_length" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" f" was sampled with {self.sampling_rate} and not {sampling_rate}.") else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""") lowerCAmelCase = isinstance(__lowerCAmelCase , np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(f"Only mono-channel audio is supported for input to {self}") lowerCAmelCase = is_batched_numpy or ( isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list))) ) if is_batched: lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech] elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray): lowerCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa) elif isinstance(__lowerCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa): lowerCAmelCase = raw_speech.astype(np.floataa) # always return batch if not is_batched: lowerCAmelCase = [np.asarray([raw_speech]).T] lowerCAmelCase = BatchFeature({"""input_features""": raw_speech}) # convert into correct format for padding lowerCAmelCase = self.pad( __lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: lowerCAmelCase = self.zero_mean_unit_var_norm( padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , ) lowerCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0) # make sure list is in array format lowerCAmelCase = padded_inputs.get("""input_features""").transpose(2 , 0 , 1) lowerCAmelCase = [self._np_extract_fbank_features(__lowerCAmelCase) for waveform in input_features[0]] if isinstance(input_features[0] , __lowerCAmelCase): lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for feature in input_features] else: lowerCAmelCase = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) lowerCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length] if return_tensors is not None: lowerCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase) return padded_inputs def a_ ( self): """simple docstring""" lowerCAmelCase = copy.deepcopy(self.__dict__) lowerCAmelCase = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
272
0
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __magic_name__ : def __init__( self : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=13 ,_UpperCAmelCase : Any=32 ,_UpperCAmelCase : Union[str, Any]=3 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] ,_UpperCAmelCase : Tuple=[2, 2, 3, 2] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=37 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Optional[Any]=10 ,_UpperCAmelCase : Tuple=0.02 ,_UpperCAmelCase : Any=["stage2", "stage3", "stage4"] ,_UpperCAmelCase : Any=[2, 3, 4] ,_UpperCAmelCase : Tuple=None ,): _a : Optional[Any] = parent _a : List[Any] = batch_size _a : str = image_size _a : Union[str, Any] = num_channels _a : List[Any] = num_stages _a : Dict = hidden_sizes _a : int = depths _a : Tuple = is_training _a : List[str] = use_labels _a : Dict = intermediate_size _a : int = hidden_act _a : int = num_labels _a : Any = initializer_range _a : Tuple = out_features _a : int = out_indices _a : List[Any] = scope def __lowercase ( self : Dict ): _a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a : Union[str, Any] = None if self.use_labels: _a : Tuple = ids_tensor([self.batch_size] ,self.num_labels ) _a : str = self.get_config() return config, pixel_values, labels def __lowercase ( self : Any ): return ConvNextVaConfig( num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,) def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ): _a : Optional[Any] = ConvNextVaModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Any = model(_UpperCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ): _a : List[Any] = ConvNextVaForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : List[str] = model(_UpperCAmelCase ,labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def __lowercase ( self : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ): _a : Optional[int] = ConvNextVaBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Dict = model(_UpperCAmelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] ) # verify backbone works with out_features=None _a : Tuple = None _a : List[Any] = ConvNextVaBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : List[str] = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) ,1 ) self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] ) def __lowercase ( self : Optional[Any] ): _a : Any = self.prepare_config_and_inputs() _a , _a , _a : Union[str, Any] = config_and_inputs _a : Any = {'pixel_values': pixel_values} return config, inputs_dict def __lowercase ( self : str ): _a : Tuple = self.prepare_config_and_inputs() _a , _a , _a : Tuple = config_and_inputs _a : List[Any] = {'pixel_values': pixel_values, 'labels': labels} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : str = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowerCAmelCase : str = ( {'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification} if is_torch_available() else {} ) lowerCAmelCase : int = False lowerCAmelCase : str = False lowerCAmelCase : Optional[Any] = False lowerCAmelCase : List[str] = False lowerCAmelCase : Optional[int] = False def __lowercase ( self : List[Any] ): _a : str = ConvNextVaModelTester(self ) _a : Tuple = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 ) def __lowercase ( self : Optional[Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __lowercase ( self : str ): return @unittest.skip(reason='ConvNextV2 does not use inputs_embeds' ) def __lowercase ( self : List[Any] ): pass @unittest.skip(reason='ConvNextV2 does not support input and output embeddings' ) def __lowercase ( self : Optional[int] ): pass @unittest.skip(reason='ConvNextV2 does not use feedforward chunking' ) def __lowercase ( self : Any ): pass def __lowercase ( self : List[str] ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: _a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels() _a : Any = True if model_class.__name__ in [ *get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase ), ]: continue _a : Optional[Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.train() _a : str = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase ) _a : Optional[int] = model(**_UpperCAmelCase ).loss loss.backward() def __lowercase ( self : str ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: _a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels() _a : Optional[int] = False _a : Tuple = True if ( model_class.__name__ in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )] or not model_class.supports_gradient_checkpointing ): continue _a : Tuple = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.gradient_checkpointing_enable() model.train() _a : Any = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase ) _a : List[Any] = model(**_UpperCAmelCase ).loss loss.backward() def __lowercase ( self : List[Any] ): _a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : int = model_class(_UpperCAmelCase ) _a : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a : Dict = [*signature.parameters.keys()] _a : int = ['pixel_values'] self.assertListEqual(arg_names[:1] ,_UpperCAmelCase ) def __lowercase ( self : int ): _a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def __lowercase ( self : Any ): def check_hidden_states_output(_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ): _a : Union[str, Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): _a : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) ) _a : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _a : str = self.model_tester.num_stages self.assertEqual(len(_UpperCAmelCase ) ,expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) _a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : int = True check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _a : Optional[Any] = True check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) def __lowercase ( self : List[Any] ): _a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @slow def __lowercase ( self : int ): for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a : Any = ConvNextVaModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def __lowerCamelCase ( ) -> List[Any]: _a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase ): @cached_property def __lowercase ( self : Optional[Any] ): return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None @slow def __lowercase ( self : Any ): _a : List[str] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_UpperCAmelCase ) _a : Optional[int] = self.default_image_processor _a : str = prepare_img() _a : str = preprocessor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): _a : Dict = model(**_UpperCAmelCase ) # verify the logits _a : Optional[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,_UpperCAmelCase ) _a : Optional[Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
89
'''simple docstring''' from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig __lowercase = logging.get_logger(__name__) __lowercase = '''T5Config''' class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : List[str] = '''mt5''' UpperCAmelCase_ : Tuple = MTaConfig class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : List[Any] = '''mt5''' UpperCAmelCase_ : int = MTaConfig class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : Tuple = '''mt5''' UpperCAmelCase_ : Union[str, Any] = MTaConfig
272
0
from typing import Any import numpy as np def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray ) -> bool: """simple docstring""" return np.array_equal(UpperCamelCase__ , matrix.conjugate().T ) def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray ) -> Any: """simple docstring""" __lowerCamelCase = v.conjugate().T __lowerCamelCase = v_star.dot(UpperCamelCase__ ) assert isinstance(UpperCamelCase__ , np.ndarray ) return (v_star_dot.dot(UpperCamelCase__ )) / (v_star.dot(UpperCamelCase__ )) def lowerCamelCase_ ( ) -> None: """simple docstring""" __lowerCamelCase = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) __lowerCamelCase = np.array([[1], [2], [3]] ) assert is_hermitian(UpperCamelCase__ ), F"""{a} is not hermitian.""" print(rayleigh_quotient(UpperCamelCase__ , UpperCamelCase__ ) ) __lowerCamelCase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(UpperCamelCase__ ), F"""{a} is not hermitian.""" assert rayleigh_quotient(UpperCamelCase__ , UpperCamelCase__ ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
90
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __lowercase = { '''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''', '''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''', } class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : List[str] = '''ernie_m''' UpperCAmelCase_ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self , __lowerCAmelCase = 250002 , __lowerCAmelCase = 768 , __lowerCAmelCase = 12 , __lowerCAmelCase = 12 , __lowerCAmelCase = 3072 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 514 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = 1 , __lowerCAmelCase = 1E-0_5 , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ): """simple docstring""" super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = max_position_embeddings lowerCAmelCase = initializer_range lowerCAmelCase = layer_norm_eps lowerCAmelCase = classifier_dropout lowerCAmelCase = is_decoder lowerCAmelCase = act_dropout
272
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ : Any = logging.get_logger(__name__) UpperCAmelCase_ : str = { """andreasmadsen/efficient_mlm_m0.40""": ( """https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json""" ), } class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = "roberta-prelayernorm" def __init__( self : Optional[Any] , lowercase_ : List[str]=50265 , lowercase_ : Union[str, Any]=768 , lowercase_ : List[str]=12 , lowercase_ : List[Any]=12 , lowercase_ : List[str]=3072 , lowercase_ : List[Any]="gelu" , lowercase_ : Any=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : List[str]=2 , lowercase_ : Optional[int]=0.02 , lowercase_ : Tuple=1e-12 , lowercase_ : List[str]=1 , lowercase_ : Optional[int]=0 , lowercase_ : List[str]=2 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : int=True , lowercase_ : int=None , **lowercase_ : Tuple , ): '''simple docstring''' super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_) SCREAMING_SNAKE_CASE_ : int = vocab_size SCREAMING_SNAKE_CASE_ : Any = hidden_size SCREAMING_SNAKE_CASE_ : int = num_hidden_layers SCREAMING_SNAKE_CASE_ : int = num_attention_heads SCREAMING_SNAKE_CASE_ : Dict = hidden_act SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings SCREAMING_SNAKE_CASE_ : Any = type_vocab_size SCREAMING_SNAKE_CASE_ : Dict = initializer_range SCREAMING_SNAKE_CASE_ : Any = layer_norm_eps SCREAMING_SNAKE_CASE_ : Any = position_embedding_type SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_cache SCREAMING_SNAKE_CASE_ : List[str] = classifier_dropout class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' if self.task == "multiple-choice": SCREAMING_SNAKE_CASE_ : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: SCREAMING_SNAKE_CASE_ : Optional[Any] = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ])
91
'''simple docstring''' import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __lowercase = logging.getLogger(__name__) class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : Any = '''sequence-classification''' def __init__( self , __lowerCAmelCase): """simple docstring""" if type(__lowerCAmelCase) == dict: lowerCAmelCase = Namespace(**__lowerCAmelCase) lowerCAmelCase = glue_output_modes[hparams.task] lowerCAmelCase = glue_tasks_num_labels[hparams.task] super().__init__(__lowerCAmelCase , __lowerCAmelCase , self.mode) def a_ ( self , **__lowerCAmelCase): """simple docstring""" return self.model(**__lowerCAmelCase) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None lowerCAmelCase = self(**__lowerCAmelCase) lowerCAmelCase = outputs[0] lowerCAmelCase = self.trainer.lr_schedulers[0]["""scheduler"""] lowerCAmelCase = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def a_ ( self): """simple docstring""" lowerCAmelCase = self.hparams lowerCAmelCase = processors[args.task]() lowerCAmelCase = processor.get_labels() for mode in ["train", "dev"]: lowerCAmelCase = self._feature_file(__lowerCAmelCase) if os.path.exists(__lowerCAmelCase) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , __lowerCAmelCase) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir) lowerCAmelCase = ( processor.get_dev_examples(args.data_dir) if mode == """dev""" else processor.get_train_examples(args.data_dir) ) lowerCAmelCase = convert_examples_to_features( __lowerCAmelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("""Saving features into cached file %s""" , __lowerCAmelCase) torch.save(__lowerCAmelCase , __lowerCAmelCase) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False): """simple docstring""" lowerCAmelCase = """dev""" if mode == """test""" else mode lowerCAmelCase = self._feature_file(__lowerCAmelCase) logger.info("""Loading features from cached file %s""" , __lowerCAmelCase) lowerCAmelCase = torch.load(__lowerCAmelCase) lowerCAmelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long) lowerCAmelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long) lowerCAmelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long) if self.hparams.glue_output_mode == "classification": lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.long) elif self.hparams.glue_output_mode == "regression": lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.float) return DataLoader( TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase) , batch_size=__lowerCAmelCase , shuffle=__lowerCAmelCase , ) def a_ ( self , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None lowerCAmelCase = self(**__lowerCAmelCase) lowerCAmelCase , lowerCAmelCase = outputs[:2] lowerCAmelCase = logits.detach().cpu().numpy() lowerCAmelCase = inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = torch.stack([x["""val_loss"""] for x in outputs]).mean().detach().cpu().item() lowerCAmelCase = np.concatenate([x["""pred"""] for x in outputs] , axis=0) if self.hparams.glue_output_mode == "classification": lowerCAmelCase = np.argmax(__lowerCAmelCase , axis=1) elif self.hparams.glue_output_mode == "regression": lowerCAmelCase = np.squeeze(__lowerCAmelCase) lowerCAmelCase = np.concatenate([x["""target"""] for x in outputs] , axis=0) lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])] lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])] lowerCAmelCase = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCAmelCase , __lowerCAmelCase)} lowerCAmelCase = dict(results.items()) lowerCAmelCase = results return ret, preds_list, out_label_list def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase) lowerCAmelCase = ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def a_ ( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase) lowerCAmelCase = ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def a_ ( __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase) parser.add_argument( """--max_seq_length""" , default=128 , type=__lowerCAmelCase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--task""" , default="""""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The GLUE task to run""" , ) parser.add_argument( """--gpus""" , default=0 , type=__lowerCAmelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""") return parser def snake_case__ ( ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase = argparse.ArgumentParser() add_generic_args(_A , os.getcwd() ) lowerCAmelCase = GLUETransformer.add_model_specific_args(_A , os.getcwd() ) lowerCAmelCase = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: lowerCAmelCase = os.path.join( """./results""" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , ) os.makedirs(args.output_dir ) lowerCAmelCase = GLUETransformer(_A ) lowerCAmelCase = generic_train(_A , _A ) # Optionally, predict on dev set and write to output_dir if args.do_predict: lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=_A ) ) lowerCAmelCase = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_A ) if __name__ == "__main__": main()
272
0
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : complex , SCREAMING_SNAKE_CASE_ : str = "x" , SCREAMING_SNAKE_CASE_ : float = 10**-10 , SCREAMING_SNAKE_CASE_ : int = 1 , ): __lowerCAmelCase = symbols(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = lambdify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = lambdify(SCREAMING_SNAKE_CASE_ , diff(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) __lowerCAmelCase = starting_point while True: if diff_function(SCREAMING_SNAKE_CASE_ ) != 0: __lowerCAmelCase = prev_guess - multiplicity * func(SCREAMING_SNAKE_CASE_ ) / diff_function( SCREAMING_SNAKE_CASE_ ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess __lowerCAmelCase = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''') # Find root of polynomial # Find fourth Root of 5 print(f'''The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}''') # Find value of e print( """The root of log(y) - 1 = 0 is """, f'''{newton_raphson("log(y) - 1", 2, variable="y")}''', ) # Exponential Roots print( """The root of exp(x) - 1 = 0 is""", f'''{newton_raphson("exp(x) - 1", 10, precision=0.005)}''', ) # Find root of cos(x) print(f'''The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}''')
92
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor __lowercase = logging.get_logger(__name__) class a__( lowerCAmelCase__ ): '''simple docstring''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" warnings.warn( """The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use DeformableDetrImageProcessor instead.""" , __lowerCAmelCase , ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase)
272
0
'''simple docstring''' import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ): lowerCAmelCase_ = RoFormerTokenizer lowerCAmelCase_ = RoFormerTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True def _snake_case ( self ): """simple docstring""" super().setUp() def _snake_case ( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__SCREAMING_SNAKE_CASE ) def _snake_case ( self , **__SCREAMING_SNAKE_CASE ): """simple docstring""" return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" lowercase_ : Dict = '''永和服装饰品有限公司,今天天气非常好''' lowercase_ : str = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好''' return input_text, output_text def _snake_case ( self ): """simple docstring""" lowercase_ : Any = self.get_tokenizer() lowercase_ , lowercase_ : Any = self.get_chinese_input_output_texts() lowercase_ : Any = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , output_text.split() ) lowercase_ : List[str] = tokens + [tokenizer.unk_token] lowercase_ : int = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00] self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" lowercase_ : int = self.get_rust_tokenizer() lowercase_ , lowercase_ : Optional[Any] = self.get_chinese_input_output_texts() lowercase_ : Any = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , output_text.split() ) lowercase_ : str = tokens + [tokenizer.unk_token] lowercase_ : List[Any] = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00] self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def _snake_case ( self ): """simple docstring""" pass def _snake_case ( self ): """simple docstring""" pass def _snake_case ( self ): """simple docstring""" pass
93
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowercase = { '''configuration_instructblip''': [ '''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InstructBlipConfig''', '''InstructBlipQFormerConfig''', '''InstructBlipVisionConfig''', ], '''processing_instructblip''': ['''InstructBlipProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InstructBlipQFormerModel''', '''InstructBlipPreTrainedModel''', '''InstructBlipForConditionalGeneration''', '''InstructBlipVisionModel''', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
272
0
from __future__ import annotations import time import numpy as np snake_case : Any = [8, 5, 9, 7] snake_case : Optional[Any] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] snake_case : List[Any] = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class _snake_case : def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ): a :List[Any] = claim_vector a :List[str] = allocated_resources_table a :Any = maximum_claim_table def SCREAMING_SNAKE_CASE__ ( self ): return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def SCREAMING_SNAKE_CASE__ ( self ): return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def SCREAMING_SNAKE_CASE__ ( self ): return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(_lowerCamelCase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def SCREAMING_SNAKE_CASE__ ( self ): return {self.__need().index(_lowerCamelCase ): i for i in self.__need()} def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ): a :Tuple = self.__need() a :int = self.__allocated_resources_table a :Dict = self.__available_resources() a :str = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('''_''' * 50 + '''\n''' ) while need_list: a :Optional[Any] = False for each_need in need_list: a :int = True for index, need in enumerate(_lowerCamelCase ): if need > available_resources[index]: a :List[Any] = False break if execution: a :Dict = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: a :int = original_need_index print(F'''Process {process_number + 1} is executing.''' ) # remove the process run from stack need_list.remove(_lowerCamelCase ) # update available/freed resources stack a :List[Any] = np.array(_lowerCamelCase ) + np.array( alloc_resources_table[process_number] ) print( '''Updated available resource stack for processes: ''' + ''' '''.join([str(_lowerCamelCase ) for x in available_resources] ) ) break if safe: print('''The process is in a safe state.\n''' ) else: print('''System in unsafe state. Aborting...\n''' ) break def SCREAMING_SNAKE_CASE__ ( self ): print(''' ''' * 9 + '''Allocated Resource Table''' ) for item in self.__allocated_resources_table: print( F'''P{self.__allocated_resources_table.index(_lowerCamelCase ) + 1}''' + ''' '''.join(F'''{it:>8}''' for it in item ) + '''\n''' ) print(''' ''' * 9 + '''System Resource Table''' ) for item in self.__maximum_claim_table: print( F'''P{self.__maximum_claim_table.index(_lowerCamelCase ) + 1}''' + ''' '''.join(F'''{it:>8}''' for it in item ) + '''\n''' ) print( '''Current Usage by Active Processes: ''' + ''' '''.join(str(_lowerCamelCase ) for x in self.__claim_vector ) ) print( '''Initial Available Resources: ''' + ''' '''.join(str(_lowerCamelCase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
94
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class a__( unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Dict = ViTImageProcessor if is_vision_available() else None @property def a_ ( self): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def a_ ( self): """simple docstring""" lowerCAmelCase = (3, 32, 128) lowerCAmelCase = tempfile.mkdtemp() # fmt: off lowerCAmelCase = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase)))) lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(__lowerCAmelCase) + """\n""") lowerCAmelCase = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 128}, } lowerCAmelCase = os.path.join(self.tmpdirname , __lowerCAmelCase) with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp: json.dump(__lowerCAmelCase , __lowerCAmelCase) def a_ ( self , **__lowerCAmelCase): """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase) def a_ ( self , **__lowerCAmelCase): """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase) def a_ ( self): """simple docstring""" shutil.rmtree(self.tmpdirname) def a_ ( self): """simple docstring""" lowerCAmelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta) lowerCAmelCase = Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1)) return image_input def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_image_processor() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) processor.save_pretrained(self.tmpdirname) lowerCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab()) self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor.image_processor , __lowerCAmelCase) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_image_processor() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) processor.save_pretrained(self.tmpdirname) lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""") lowerCAmelCase = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0) lowerCAmelCase = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.char_tokenizer , __lowerCAmelCase) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , __lowerCAmelCase) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = image_processor(__lowerCAmelCase , return_tensors="""np""") lowerCAmelCase = processor(images=__lowerCAmelCase , return_tensors="""np""") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = """test""" lowerCAmelCase = processor(text=__lowerCAmelCase) lowerCAmelCase = tokenizer(__lowerCAmelCase) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = """test""" lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase) self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""]) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase): processor() def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase = processor.char_decode(__lowerCAmelCase) lowerCAmelCase = tokenizer.batch_decode(__lowerCAmelCase) lowerCAmelCase = [seq.replace(""" """ , """""") for seq in decoded_tok] self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = None lowerCAmelCase = self.prepare_image_inputs() lowerCAmelCase = processor(text=__lowerCAmelCase , images=__lowerCAmelCase) self.assertListEqual(list(inputs.keys()) , processor.model_input_names) def a_ ( self): """simple docstring""" lowerCAmelCase = self.get_image_processor() lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = MgpstrProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase) lowerCAmelCase = torch.randn(1 , 27 , 38) lowerCAmelCase = torch.randn(1 , 27 , 50257) lowerCAmelCase = torch.randn(1 , 27 , 30522) lowerCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input]) self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
272
0
import math def _A ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 ): """simple docstring""" a__ : Union[str, Any] =end or len(SCREAMING_SNAKE_CASE ) for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): a__ : Dict =i a__ : Optional[int] =array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: a__ : Tuple =array[temp_index - 1] temp_index -= 1 a__ : Any =temp_index_value return array def _A ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): # Max Heap """simple docstring""" a__ : Optional[int] =index a__ : Any =2 * index + 1 # Left Node a__ : Tuple =2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: a__ : int =left_index if right_index < heap_size and array[largest] < array[right_index]: a__ : int =right_index if largest != index: a__ , a__ : str =array[largest], array[index] heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def _A ( SCREAMING_SNAKE_CASE : list ): """simple docstring""" a__ : Any =len(SCREAMING_SNAKE_CASE ) for i in range(n // 2 , -1 , -1 ): heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for i in range(n - 1 , 0 , -1 ): a__ , a__ : Optional[Any] =array[0], array[i] heapify(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE ) return array def _A ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def _A ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): """simple docstring""" a__ : str =low a__ : List[Any] =high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i a__ , a__ : Tuple =array[j], array[i] i += 1 def _A ( SCREAMING_SNAKE_CASE : list ): """simple docstring""" if len(SCREAMING_SNAKE_CASE ) == 0: return array a__ : str =2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE ) ) ) a__ : str =16 return intro_sort(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def _A ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(SCREAMING_SNAKE_CASE ) max_depth -= 1 a__ : Dict =median_of_a(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 ) a__ : Any =partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) intro_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) a__ : Tuple =p return insertion_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase : Optional[Any] = input("""Enter numbers separated by a comma : """).strip() UpperCAmelCase : Dict = [float(item) for item in user_input.split(""",""")] print(sort(unsorted))
95
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class a__( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Tuple = XLMRobertaTokenizer UpperCAmelCase_ : int = XLMRobertaTokenizerFast UpperCAmelCase_ : List[str] = True UpperCAmelCase_ : Optional[int] = True def a_ ( self): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase) tokenizer.save_pretrained(self.tmpdirname) def a_ ( self): """simple docstring""" lowerCAmelCase = """<pad>""" lowerCAmelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase) , __lowerCAmelCase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase) , __lowerCAmelCase) def a_ ( self): """simple docstring""" lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , """<s>""") self.assertEqual(vocab_keys[1] , """<pad>""") self.assertEqual(vocab_keys[-1] , """<mask>""") self.assertEqual(len(__lowerCAmelCase) , 1002) def a_ ( self): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1002) def a_ ( self): """simple docstring""" lowerCAmelCase = XLMRobertaTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase) lowerCAmelCase = tokenizer.tokenize("""This is a test""") self.assertListEqual(__lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""") self.assertListEqual( __lowerCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase) self.assertListEqual( __lowerCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) lowerCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase) self.assertListEqual( __lowerCAmelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def a_ ( self): """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowerCAmelCase = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase) lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files)) lowerCAmelCase = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f) self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase) # Checks everything loads correctly in the same way lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(__lowerCAmelCase) # Save tokenizer rust, legacy_format=True lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase) lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase) # Checks it save with the same files self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase) # Checks everything loads correctly in the same way lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase)) shutil.rmtree(__lowerCAmelCase) # Save tokenizer rust, legacy_format=False lowerCAmelCase = tempfile.mkdtemp() lowerCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase) lowerCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way lowerCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase) lowerCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase)) shutil.rmtree(__lowerCAmelCase) @cached_property def a_ ( self): """simple docstring""" return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""") def a_ ( self): """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__lowerCAmelCase , f.name) lowerCAmelCase = XLMRobertaTokenizer(f.name , keep_accents=__lowerCAmelCase) lowerCAmelCase = pickle.dumps(__lowerCAmelCase) pickle.loads(__lowerCAmelCase) def a_ ( self): """simple docstring""" if not self.test_rust_tokenizer: return lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_rust_tokenizer() lowerCAmelCase = """I was born in 92000, and this is falsé.""" lowerCAmelCase = tokenizer.tokenize(__lowerCAmelCase) lowerCAmelCase = rust_tokenizer.tokenize(__lowerCAmelCase) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) lowerCAmelCase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase) lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) lowerCAmelCase = self.get_rust_tokenizer() lowerCAmelCase = tokenizer.encode(__lowerCAmelCase) lowerCAmelCase = rust_tokenizer.encode(__lowerCAmelCase) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase) @slow def a_ ( self): """simple docstring""" lowerCAmelCase = """Hello World!""" lowerCAmelCase = [0, 35378, 6661, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase)) @slow def a_ ( self): """simple docstring""" lowerCAmelCase = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) lowerCAmelCase = [ 0, 3293, 83, 10, 4552, 4989, 7986, 678, 10, 5915, 111, 179459, 124850, 4, 6044, 237, 12, 6, 5, 6, 4, 6780, 705, 15, 1388, 44, 378, 10114, 711, 152, 20, 6, 5, 22376, 642, 1221, 15190, 34153, 450, 5608, 959, 1119, 57702, 136, 186, 47, 1098, 29367, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6044, 237, 6284, 50901, 528, 31, 90, 34, 927, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase)) @slow def a_ ( self): """simple docstring""" lowerCAmelCase = {"""input_ids""": [[0, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [0, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__lowerCAmelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
272
0
"""simple docstring""" import math def _snake_case ( lowercase__ ): _lowerCamelCase : Any = [True] * n _lowerCamelCase : List[Any] = False _lowerCamelCase : Optional[int] = False _lowerCamelCase : Optional[int] = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): _lowerCamelCase : Union[str, Any] = i * 2 while index < n: _lowerCamelCase : List[Any] = False _lowerCamelCase : str = index + i _lowerCamelCase : Any = [2] for i in range(3 , lowercase__ , 2 ): if is_prime[i]: primes.append(lowercase__ ) return primes def _snake_case ( lowercase__ = 999966663333 ): _lowerCamelCase : Tuple = math.floor(math.sqrt(lowercase__ ) ) + 100 _lowerCamelCase : Optional[int] = prime_sieve(lowercase__ ) _lowerCamelCase : List[Any] = 0 _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : Tuple = primes[prime_index] while (last_prime**2) <= limit: _lowerCamelCase : List[str] = primes[prime_index + 1] _lowerCamelCase : Dict = last_prime**2 _lowerCamelCase : int = next_prime**2 # Get numbers divisible by lps(current) _lowerCamelCase : Any = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) _lowerCamelCase : str = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps _lowerCamelCase : int = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair _lowerCamelCase : str = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
96
'''simple docstring''' def snake_case__ ( _A: int , _A: int ) -> int: '''simple docstring''' while a != 0: lowerCAmelCase , lowerCAmelCase = b % a, a return b def snake_case__ ( _A: int , _A: int ) -> int: '''simple docstring''' if gcd(_A , _A ) != 1: lowerCAmelCase = f"mod inverse of {a!r} and {m!r} does not exist" raise ValueError(_A ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 0, a lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0, 1, m while va != 0: lowerCAmelCase = ua // va lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
272
0
'''simple docstring''' from PIL import Image def a ( __a , __a ) -> Image: '''simple docstring''' def brightness(__a ) -> float: return 128 + level + (c - 128) if not -2_5_5.0 <= level <= 2_5_5.0: raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' ) return img.point(__a ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change brightness to 100 __snake_case = change_brightness(img, 100) brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
97
'''simple docstring''' import math import flax.linen as nn import jax.numpy as jnp def snake_case__ ( _A: jnp.ndarray , _A: int , _A: float = 1 , _A: float = 1 , _A: float = 1.0e4 , _A: bool = False , _A: float = 1.0 , ) -> jnp.ndarray: '''simple docstring''' assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even" lowerCAmelCase = float(embedding_dim // 2 ) lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(_A , dtype=jnp.floataa ) * -log_timescale_increment ) lowerCAmelCase = jnp.expand_dims(_A , 1 ) * jnp.expand_dims(_A , 0 ) # scale embeddings lowerCAmelCase = scale * emb if flip_sin_to_cos: lowerCAmelCase = jnp.concatenate([jnp.cos(_A ), jnp.sin(_A )] , axis=1 ) else: lowerCAmelCase = jnp.concatenate([jnp.sin(_A ), jnp.cos(_A )] , axis=1 ) lowerCAmelCase = jnp.reshape(_A , [jnp.shape(_A )[0], embedding_dim] ) return signal class a__( nn.Module ): '''simple docstring''' UpperCAmelCase_ : int = 3_2 UpperCAmelCase_ : jnp.dtype = jnp.floataa @nn.compact def __call__( self , __lowerCAmelCase): """simple docstring""" lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""")(__lowerCAmelCase) lowerCAmelCase = nn.silu(__lowerCAmelCase) lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""")(__lowerCAmelCase) return temb class a__( nn.Module ): '''simple docstring''' UpperCAmelCase_ : int = 3_2 UpperCAmelCase_ : bool = False UpperCAmelCase_ : float = 1 @nn.compact def __call__( self , __lowerCAmelCase): """simple docstring""" return get_sinusoidal_embeddings( __lowerCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
272
0
"""simple docstring""" from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class snake_case ( __UpperCAmelCase ): """simple docstring""" snake_case__ = "new-model" if is_tf_available(): class snake_case ( __UpperCAmelCase ): """simple docstring""" snake_case__ = NewModelConfig @require_tf class snake_case ( unittest.TestCase ): """simple docstring""" @slow def __lowerCAmelCase ( self : str ): UpperCAmelCase__ = 'bert-base-cased' UpperCAmelCase__ = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) UpperCAmelCase__ = TFAutoModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) @slow def __lowerCAmelCase ( self : Dict ): UpperCAmelCase__ = 'bert-base-cased' UpperCAmelCase__ = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) UpperCAmelCase__ = TFAutoModelForPreTraining.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) @slow def __lowerCAmelCase ( self : int ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) UpperCAmelCase__ = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ ) UpperCAmelCase__ , UpperCAmelCase__ = TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ ,output_loading_info=lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) @slow def __lowerCAmelCase ( self : Dict ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) UpperCAmelCase__ = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) @slow def __lowerCAmelCase ( self : List[str] ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) UpperCAmelCase__ = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ ) UpperCAmelCase__ , UpperCAmelCase__ = TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ ,output_loading_info=lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) @slow def __lowerCAmelCase ( self : str ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) UpperCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ ) UpperCAmelCase__ , UpperCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ ,output_loading_info=lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) @slow def __lowerCAmelCase ( self : Dict ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: UpperCAmelCase__ = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) UpperCAmelCase__ = TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) @slow def __lowerCAmelCase ( self : str ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: UpperCAmelCase__ = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) UpperCAmelCase__ = TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) @slow @require_tensorflow_probability def __lowerCAmelCase ( self : List[Any] ): for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: UpperCAmelCase__ = AutoConfig.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) UpperCAmelCase__ = TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCamelCase__ ) UpperCAmelCase__ , UpperCAmelCase__ = TFAutoModelForTableQuestionAnswering.from_pretrained( lowerCamelCase__ ,output_loading_info=lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def __lowerCAmelCase ( self : Dict ): UpperCAmelCase__ = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) self.assertEqual(model.num_parameters() ,14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) ,14_410 ) def __lowerCAmelCase ( self : str ): UpperCAmelCase__ = TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) self.assertEqual(model.num_parameters() ,14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) ,14_410 ) def __lowerCAmelCase ( self : Optional[Any] ): # For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel UpperCAmelCase__ = TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) UpperCAmelCase__ = copy.deepcopy(model.config ) UpperCAmelCase__ = ['FunnelBaseModel'] UpperCAmelCase__ = TFAutoModel.from_config(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(lowerCamelCase__ ) UpperCAmelCase__ = TFAutoModel.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) def __lowerCAmelCase ( self : int ): try: AutoConfig.register('new-model' ,lowerCamelCase__ ) UpperCAmelCase__ = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(lowerCamelCase__ ): auto_class.register(lowerCamelCase__ ,lowerCamelCase__ ) auto_class.register(lowerCamelCase__ ,lowerCamelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase__ ): auto_class.register(lowerCamelCase__ ,lowerCamelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCAmelCase__ = BertModelTester(self ).get_config() UpperCAmelCase__ = NewModelConfig(**tiny_config.to_dict() ) UpperCAmelCase__ = auto_class.from_config(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(lowerCamelCase__ ) UpperCAmelCase__ = auto_class.from_pretrained(lowerCamelCase__ ) self.assertIsInstance(lowerCamelCase__ ,lowerCamelCase__ ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def __lowerCAmelCase ( self : str ): with self.assertRaisesRegex( lowerCamelCase__ ,'bert-base is not a local folder and is not a valid model identifier' ): UpperCAmelCase__ = TFAutoModel.from_pretrained('bert-base' ) def __lowerCAmelCase ( self : Tuple ): with self.assertRaisesRegex( lowerCamelCase__ ,R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): UpperCAmelCase__ = TFAutoModel.from_pretrained(lowerCamelCase__ ,revision='aaaaaa' ) def __lowerCAmelCase ( self : List[str] ): with self.assertRaisesRegex( lowerCamelCase__ ,'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' ,): UpperCAmelCase__ = TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' ) def __lowerCAmelCase ( self : Optional[Any] ): with self.assertRaisesRegex(lowerCamelCase__ ,'Use `from_pt=True` to load this model' ): UpperCAmelCase__ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' ) def __lowerCAmelCase ( self : Union[str, Any] ): # Make sure we have cached the model. UpperCAmelCase__ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' ) with RequestCounter() as counter: UpperCAmelCase__ = TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' ) self.assertEqual(counter.get_request_count ,0 ) self.assertEqual(counter.head_request_count ,1 ) self.assertEqual(counter.other_request_count ,0 ) # With a sharded checkpoint UpperCAmelCase__ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' ) with RequestCounter() as counter: UpperCAmelCase__ = TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' ) self.assertEqual(counter.get_request_count ,0 ) self.assertEqual(counter.head_request_count ,1 ) self.assertEqual(counter.other_request_count ,0 )
98
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowercase = { '''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NezhaForNextSentencePrediction''', '''NezhaForMaskedLM''', '''NezhaForPreTraining''', '''NezhaForMultipleChoice''', '''NezhaForQuestionAnswering''', '''NezhaForSequenceClassification''', '''NezhaForTokenClassification''', '''NezhaModel''', '''NezhaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
272
0
from math import loga def A_ ( A__ ) -> int: if a < 0: raise ValueError('Input value must be a positive integer' ) elif isinstance(A__ , A__ ): raise TypeError('Input value must be a \'int\' type' ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
99
'''simple docstring''' from math import sqrt def snake_case__ ( _A: int = 1000000 ) -> int: '''simple docstring''' lowerCAmelCase = 0 lowerCAmelCase = 0 lowerCAmelCase = 42 while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_A , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f'{solution() = }')
272
0
"""simple docstring""" def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ): _validate_point(UpperCamelCase_ ) _validate_point(UpperCamelCase_ ) if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(a - b ) for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ) ) ) def _lowerCAmelCase ( UpperCamelCase_ ): if point: if isinstance(UpperCamelCase_ , UpperCamelCase_ ): for item in point: if not isinstance(UpperCamelCase_ , (int, float) ): __SCREAMING_SNAKE_CASE = ( """Expected a list of numbers as input, found """ f"{type(UpperCamelCase_ ).__name__}" ) raise TypeError(UpperCamelCase_ ) else: __SCREAMING_SNAKE_CASE = f"Expected a list of numbers as input, found {type(UpperCamelCase_ ).__name__}" raise TypeError(UpperCamelCase_ ) else: raise ValueError("""Missing an input""" ) def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ): _validate_point(UpperCamelCase_ ) _validate_point(UpperCamelCase_ ) if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(x - y ) for x, y in zip(UpperCamelCase_ , UpperCamelCase_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
100
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowercase = { '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
272
0