code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> str: UpperCamelCase_: int = len(UpperCAmelCase__ ) UpperCamelCase_: int = len(UpperCAmelCase__ ) UpperCamelCase_: int = ( first_str_length if first_str_length > second_str_length else second_str_length ) UpperCamelCase_: list = [] for char_count in range(UpperCAmelCase__ ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(UpperCAmelCase__ ) if __name__ == "__main__": print(alternative_string_arrange('AB', 'XYZ'), end=' ')
57
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal A_ : Union[str, Any] = datasets.utils.logging.get_logger(__name__) A_ : Optional[Any] = ['names', 'prefix'] A_ : List[str] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] A_ : List[Any] = ['encoding_errors', 'on_bad_lines'] A_ : Optional[Any] = ['date_format'] @dataclass class _lowerCAmelCase( datasets.BuilderConfig ): """simple docstring""" a : str ="," a : Optional[str] =None a : Optional[Union[int, List[int], str]] ="infer" a : Optional[List[str]] =None a : Optional[List[str]] =None a : Optional[Union[int, str, List[int], List[str]]] =None a : Optional[Union[List[int], List[str]]] =None a : Optional[str] =None a : bool =True a : Optional[Literal["c", "python", "pyarrow"]] =None a : Dict[Union[int, str], Callable[[Any], Any]] =None a : Optional[list] =None a : Optional[list] =None a : bool =False a : Optional[Union[int, List[int]]] =None a : Optional[int] =None a : Optional[Union[str, List[str]]] =None a : bool =True a : bool =True a : bool =False a : bool =True a : Optional[str] =None a : str ="." a : Optional[str] =None a : str ='"' a : int =0 a : Optional[str] =None a : Optional[str] =None a : Optional[str] =None a : Optional[str] =None a : bool =True a : bool =True a : int =0 a : bool =True a : bool =False a : Optional[str] =None a : int =10000 a : Optional[datasets.Features] =None a : Optional[str] ="strict" a : Literal["error", "warn", "skip"] ="error" a : Optional[str] =None def _a ( self ): if self.delimiter is not None: UpperCamelCase_: Optional[Any] = self.delimiter if self.column_names is not None: UpperCamelCase_: int = self.column_names @property def _a ( self ): UpperCamelCase_: Any = { 'sep': self.sep, 'header': self.header, 'names': self.names, 'index_col': self.index_col, 'usecols': self.usecols, 'prefix': self.prefix, 'mangle_dupe_cols': self.mangle_dupe_cols, 'engine': self.engine, 'converters': self.converters, 'true_values': self.true_values, 'false_values': self.false_values, 'skipinitialspace': self.skipinitialspace, 'skiprows': self.skiprows, 'nrows': self.nrows, 'na_values': self.na_values, 'keep_default_na': self.keep_default_na, 'na_filter': self.na_filter, 'verbose': self.verbose, 'skip_blank_lines': self.skip_blank_lines, 'thousands': self.thousands, 'decimal': self.decimal, 'lineterminator': self.lineterminator, 'quotechar': self.quotechar, 'quoting': self.quoting, 'escapechar': self.escapechar, 'comment': self.comment, 'encoding': self.encoding, 'dialect': self.dialect, 'error_bad_lines': self.error_bad_lines, 'warn_bad_lines': self.warn_bad_lines, 'skipfooter': self.skipfooter, 'doublequote': self.doublequote, 'memory_map': self.memory_map, 'float_precision': self.float_precision, 'chunksize': self.chunksize, 'encoding_errors': self.encoding_errors, 'on_bad_lines': self.on_bad_lines, 'date_format': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _lowerCamelCase ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class _lowerCAmelCase( datasets.ArrowBasedBuilder ): """simple docstring""" a : Dict =CsvConfig def _a ( self ): return datasets.DatasetInfo(features=self.config.features ) def _a ( self , _lowerCamelCase ): if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) UpperCamelCase_: Tuple = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_lowerCamelCase , (str, list, tuple) ): UpperCamelCase_: List[Any] = data_files if isinstance(_lowerCamelCase , _lowerCamelCase ): UpperCamelCase_: str = [files] UpperCamelCase_: Tuple = [dl_manager.iter_files(_lowerCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] UpperCamelCase_: Tuple = [] for split_name, files in data_files.items(): if isinstance(_lowerCamelCase , _lowerCamelCase ): UpperCamelCase_: Dict = [files] UpperCamelCase_: int = [dl_manager.iter_files(_lowerCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'files': files} ) ) return splits def _a ( self , _lowerCamelCase ): if self.config.features is not None: UpperCamelCase_: List[Any] = self.config.features.arrow_schema if all(not require_storage_cast(_lowerCamelCase ) for feature in self.config.features.values() ): # cheaper cast UpperCamelCase_: Optional[int] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_lowerCamelCase ) else: # more expensive cast; allows str <-> int/float or str to Audio for example UpperCamelCase_: int = table_cast(_lowerCamelCase , _lowerCamelCase ) return pa_table def _a ( self , _lowerCamelCase ): UpperCamelCase_: List[str] = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str UpperCamelCase_: Dict = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(_lowerCamelCase ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ): UpperCamelCase_: Optional[Any] = pd.read_csv(_lowerCamelCase , iterator=_lowerCamelCase , dtype=_lowerCamelCase , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(_lowerCamelCase ): UpperCamelCase_: Union[str, Any] = pa.Table.from_pandas(_lowerCamelCase ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase ) except ValueError as e: logger.error(f'''Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}''' ) raise
57
1
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) _UpperCAmelCase = { 'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in', 'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0', 'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out', 'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1', 'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm', 'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2', 'mask_downscaling.0': 'mask_embed.conv1', 'mask_downscaling.1': 'mask_embed.layer_norm1', 'mask_downscaling.3': 'mask_embed.conv2', 'mask_downscaling.4': 'mask_embed.layer_norm2', 'mask_downscaling.6': 'mask_embed.conv3', 'point_embeddings': 'point_embed', 'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding', 'image_encoder': 'vision_encoder', 'neck.0': 'neck.conv1', 'neck.1': 'neck.layer_norm1', 'neck.2': 'neck.conv2', 'neck.3': 'neck.layer_norm2', 'patch_embed.proj': 'patch_embed.projection', '.norm': '.layer_norm', 'blocks': 'layers', } def lowerCAmelCase_ ( UpperCamelCase_ ) -> List[str]: UpperCamelCase_ = {} state_dict.pop("pixel_mean" , UpperCamelCase_ ) state_dict.pop("pixel_std" , UpperCamelCase_ ) UpperCamelCase_ = r".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*" for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: UpperCamelCase_ = key.replace(UpperCamelCase_ , UpperCamelCase_ ) if re.match(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase_ = int(re.match(UpperCamelCase_ , UpperCamelCase_ ).group(2 ) ) if layer_nb == 0: UpperCamelCase_ = key.replace("layers.0" , "proj_in" ) elif layer_nb == 1: UpperCamelCase_ = key.replace("layers.1" , "layers.0" ) elif layer_nb == 2: UpperCamelCase_ = key.replace("layers.2" , "proj_out" ) UpperCamelCase_ = value UpperCamelCase_ = model_state_dict[ "prompt_encoder.shared_embedding.positional_embedding" ] return model_state_dict def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="ybelkada/segment-anything" ) -> Tuple: UpperCamelCase_ = hf_hub_download(UpperCamelCase_ , F'''checkpoints/{model_name}.pth''' ) if "sam_vit_b" in model_name: UpperCamelCase_ = SamConfig() elif "sam_vit_l" in model_name: UpperCamelCase_ = SamVisionConfig( hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) UpperCamelCase_ = SamConfig( vision_config=UpperCamelCase_ , ) elif "sam_vit_h" in model_name: UpperCamelCase_ = SamVisionConfig( hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) UpperCamelCase_ = SamConfig( vision_config=UpperCamelCase_ , ) UpperCamelCase_ = torch.load(UpperCamelCase_ , map_location="cpu" ) UpperCamelCase_ = replace_keys(UpperCamelCase_ ) UpperCamelCase_ = SamImageProcessor() UpperCamelCase_ = SamProcessor(image_processor=UpperCamelCase_ ) UpperCamelCase_ = SamModel(UpperCamelCase_ ) hf_model.load_state_dict(UpperCamelCase_ ) UpperCamelCase_ = hf_model.to("cuda" ) UpperCamelCase_ = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" UpperCamelCase_ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert("RGB" ) UpperCamelCase_ = [[[400, 650]]] UpperCamelCase_ = [[1]] UpperCamelCase_ = processor(images=np.array(UpperCamelCase_ ) , return_tensors="pt" ).to("cuda" ) with torch.no_grad(): UpperCamelCase_ = hf_model(**UpperCamelCase_ ) UpperCamelCase_ = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.5_79_89_02_51_15_96_68 UpperCamelCase_ = processor( images=np.array(UpperCamelCase_ ) , input_points=UpperCamelCase_ , input_labels=UpperCamelCase_ , return_tensors="pt" ).to("cuda" ) with torch.no_grad(): UpperCamelCase_ = hf_model(**UpperCamelCase_ ) UpperCamelCase_ = output.iou_scores.squeeze() assert scores[-1].item() == 0.97_12_60_30_92_19_36_04 UpperCamelCase_ = ((75, 275, 1725, 850),) UpperCamelCase_ = processor(images=np.array(UpperCamelCase_ ) , input_boxes=UpperCamelCase_ , return_tensors="pt" ).to("cuda" ) with torch.no_grad(): UpperCamelCase_ = hf_model(**UpperCamelCase_ ) UpperCamelCase_ = output.iou_scores.squeeze() assert scores[-1].item() == 0.86_86_01_56_05_92_65_14 # Test with 2 points and 1 image. UpperCamelCase_ = [[[400, 650], [800, 650]]] UpperCamelCase_ = [[1, 1]] UpperCamelCase_ = processor( images=np.array(UpperCamelCase_ ) , input_points=UpperCamelCase_ , input_labels=UpperCamelCase_ , return_tensors="pt" ).to("cuda" ) with torch.no_grad(): UpperCamelCase_ = hf_model(**UpperCamelCase_ ) UpperCamelCase_ = output.iou_scores.squeeze() assert scores[-1].item() == 0.99_36_04_77_92_43_46_92 if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() _UpperCAmelCase = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195'] parser.add_argument( '--model_name', default='sam_vit_h_4b8939', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) parser.add_argument( '--model_hub_id', default='ybelkada/segment-anything', choices=choices, type=str, help='Path to hf config.json of model to convert', ) _UpperCAmelCase = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
702
import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() _UpperCAmelCase = logging.get_logger('transformers.models.speecht5') _UpperCAmelCase = { 'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm', 'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection', 'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv', 'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed', } _UpperCAmelCase = { 'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens', 'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha', } _UpperCAmelCase = { 'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0', 'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1', 'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer', 'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha', 'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer', } _UpperCAmelCase = { 'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out', 'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out', 'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv', 'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm', 'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv', 'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm', 'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv', 'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm', 'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv', 'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm', 'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv', 'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm', } _UpperCAmelCase = { 'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens', } _UpperCAmelCase = { 'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head', } _UpperCAmelCase = { 'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj', 'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj', 'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj', 'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj', 'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm', 'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense', 'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense', 'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm', 'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k', } _UpperCAmelCase = { 'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj', 'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj', 'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj', 'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj', 'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm', 'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj', 'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj', 'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj', 'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj', 'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm', 'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense', 'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense', 'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm', } _UpperCAmelCase = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } _UpperCAmelCase = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } _UpperCAmelCase = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } _UpperCAmelCase = [] _UpperCAmelCase = [ 'encoder.version', 'encoder.layers.*.norm_k.weight', 'encoder.layers.*.norm_k.bias', 'decoder.version', 'decoder.layers.*.norm_k.weight', 'decoder.layers.*.norm_k.bias', 'decoder.pos_emb.pe_k', 'speech_encoder_prenet.embed_positions._float_tensor', 'text_decoder_prenet.embed_positions._float_tensor', ] _UpperCAmelCase = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'speech_decoder_prenet.*', 'speech_decoder_postnet.*', ] _UpperCAmelCase = IGNORE_KEYS + [ 'encoder.proj', 'speech_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] _UpperCAmelCase = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Any: for attribute in key.split("." ): UpperCamelCase_ = getattr(UpperCamelCase_ , UpperCamelCase_ ) if weight_type is not None: UpperCamelCase_ = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape else: UpperCamelCase_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCamelCase_ = value elif weight_type == "weight_g": UpperCamelCase_ = value elif weight_type == "weight_v": UpperCamelCase_ = value elif weight_type == "bias": UpperCamelCase_ = value elif weight_type == "running_mean": UpperCamelCase_ = value elif weight_type == "running_var": UpperCamelCase_ = value elif weight_type == "num_batches_tracked": UpperCamelCase_ = value else: UpperCamelCase_ = value logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' ) def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple: for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: UpperCamelCase_ , UpperCamelCase_ = key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]: UpperCamelCase_ = [] if task == "s2t": UpperCamelCase_ = hf_model.speechta.encoder.prenet.feature_encoder UpperCamelCase_ = MAPPING_S2T UpperCamelCase_ = IGNORE_KEYS_S2T elif task == "t2s": UpperCamelCase_ = None UpperCamelCase_ = MAPPING_T2S UpperCamelCase_ = IGNORE_KEYS_T2S elif task == "s2s": UpperCamelCase_ = hf_model.speechta.encoder.prenet.feature_encoder UpperCamelCase_ = MAPPING_S2S UpperCamelCase_ = IGNORE_KEYS_S2S else: raise ValueError(F'''Unsupported task: {task}''' ) for name, value in fairseq_dict.items(): if should_ignore(UpperCamelCase_ , UpperCamelCase_ ): logger.info(F'''{name} was ignored''' ) continue UpperCamelCase_ = False if "conv_layers" in name: load_conv_layer( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == "group" , ) UpperCamelCase_ = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: UpperCamelCase_ , UpperCamelCase_ = key.split(".*." ) if prefix in name and suffix in name: UpperCamelCase_ = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: UpperCamelCase_ = True if "*" in mapped_key: UpperCamelCase_ = name.split(UpperCamelCase_ )[0].split("." )[-2] UpperCamelCase_ = mapped_key.replace("*" , UpperCamelCase_ ) if "weight_g" in name: UpperCamelCase_ = "weight_g" elif "weight_v" in name: UpperCamelCase_ = "weight_v" elif "bias" in name: UpperCamelCase_ = "bias" elif "weight" in name: UpperCamelCase_ = "weight" elif "running_mean" in name: UpperCamelCase_ = "running_mean" elif "running_var" in name: UpperCamelCase_ = "running_var" elif "num_batches_tracked" in name: UpperCamelCase_ = "num_batches_tracked" else: UpperCamelCase_ = None set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) continue if not is_used: unused_weights.append(UpperCamelCase_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]: UpperCamelCase_ = full_name.split("conv_layers." )[-1] UpperCamelCase_ = name.split("." ) UpperCamelCase_ = int(items[0] ) UpperCamelCase_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCamelCase_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCamelCase_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) UpperCamelCase_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCamelCase_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(UpperCamelCase_ ) @torch.no_grad() def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , ) -> str: if config_path is not None: UpperCamelCase_ = SpeechTaConfig.from_pretrained(UpperCamelCase_ ) else: UpperCamelCase_ = SpeechTaConfig() if task == "s2t": UpperCamelCase_ = config.max_text_positions UpperCamelCase_ = SpeechTaForSpeechToText(UpperCamelCase_ ) elif task == "t2s": UpperCamelCase_ = 1876 UpperCamelCase_ = 600 UpperCamelCase_ = config.max_speech_positions UpperCamelCase_ = SpeechTaForTextToSpeech(UpperCamelCase_ ) elif task == "s2s": UpperCamelCase_ = 1876 UpperCamelCase_ = config.max_speech_positions UpperCamelCase_ = SpeechTaForSpeechToSpeech(UpperCamelCase_ ) else: raise ValueError(F'''Unknown task name: {task}''' ) if vocab_path: UpperCamelCase_ = SpeechTaTokenizer(UpperCamelCase_ , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it UpperCamelCase_ = AddedToken("<mask>" , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) UpperCamelCase_ = mask_token tokenizer.add_special_tokens({"mask_token": mask_token} ) tokenizer.add_tokens(["<ctc_blank>"] ) UpperCamelCase_ = SpeechTaFeatureExtractor() UpperCamelCase_ = SpeechTaProcessor(tokenizer=UpperCamelCase_ , feature_extractor=UpperCamelCase_ ) processor.save_pretrained(UpperCamelCase_ ) UpperCamelCase_ = torch.load(UpperCamelCase_ ) recursively_load_weights(fairseq_checkpoint["model"] , UpperCamelCase_ , UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) if repo_id: print("Pushing to the hub..." ) processor.push_to_hub(UpperCamelCase_ ) model.push_to_hub(UpperCamelCase_ ) if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '--task', default='s2t', type=str, help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.', ) parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) _UpperCAmelCase = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
371
0
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def lowerCAmelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ): """simple docstring""" if (ksize % 2) == 0: __lowercase = ksize + 1 __lowercase = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(SCREAMING_SNAKE_CASE_ ): for x in range(SCREAMING_SNAKE_CASE_ ): # distance from center __lowercase = x - ksize // 2 __lowercase = y - ksize // 2 # degree to radiant __lowercase = theta / 180 * np.pi __lowercase = np.cos(_theta ) __lowercase = np.sin(_theta ) # get kernel x __lowercase = cos_theta * px + sin_theta * py # get kernel y __lowercase = -sin_theta * px + cos_theta * py # fill kernel __lowercase = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image UpperCAmelCase__ =imread("../image_data/lena.jpg") # turn image in gray scale value UpperCAmelCase__ =cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges UpperCAmelCase__ =np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 120, 150]: UpperCAmelCase__ =gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) UpperCAmelCase__ =out / out.max() * 255 UpperCAmelCase__ =out.astype(np.uinta) imshow("Original", gray) imshow("Gabor filter with 20x20 mask and 6 directions", out) waitKey(0)
616
'''simple docstring''' import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class __UpperCAmelCase : '''simple docstring''' def __init__( self : Optional[int] , _lowercase : Dict , _lowercase : Optional[int]=100 , _lowercase : List[str]=13 , _lowercase : Tuple=30 , _lowercase : Optional[int]=2 , _lowercase : Optional[int]=3 , _lowercase : Any=True , _lowercase : Dict=True , _lowercase : Any=32 , _lowercase : Union[str, Any]=4 , _lowercase : Union[str, Any]=4 , _lowercase : Tuple=37 , _lowercase : List[Any]="gelu" , _lowercase : int=0.1 , _lowercase : str=0.1 , _lowercase : Tuple=10 , _lowercase : str=0.02 , _lowercase : Dict=3 , _lowercase : Dict=None , _lowercase : Tuple=[0, 1, 2, 3] , ) -> str: A_ = parent A_ = 100 A_ = batch_size A_ = image_size A_ = patch_size A_ = num_channels A_ = is_training A_ = use_labels A_ = hidden_size A_ = num_hidden_layers A_ = num_attention_heads A_ = intermediate_size A_ = hidden_act A_ = hidden_dropout_prob A_ = attention_probs_dropout_prob A_ = type_sequence_label_size A_ = initializer_range A_ = scope A_ = out_indices A_ = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A_ = (image_size // patch_size) ** 2 A_ = num_patches + 1 def __snake_case ( self : List[Any]) -> str: A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) A_ = None A_ = None if self.use_labels: A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels) A_ = self.get_config() return config, pixel_values, labels, pixel_labels def __snake_case ( self : Union[str, Any]) -> Optional[int]: return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def __snake_case ( self : List[Any] , _lowercase : Any , _lowercase : int , _lowercase : Optional[Any] , _lowercase : Tuple) -> Tuple: A_ = BeitModel(config=_lowercase) model.to(_lowercase) model.eval() A_ = model(_lowercase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def __snake_case ( self : Optional[int] , _lowercase : str , _lowercase : Optional[int] , _lowercase : str , _lowercase : Optional[Any]) -> Union[str, Any]: A_ = BeitForMaskedImageModeling(config=_lowercase) model.to(_lowercase) model.eval() A_ = model(_lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size)) def __snake_case ( self : Optional[int] , _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : str , _lowercase : Optional[int]) -> Any: A_ = self.type_sequence_label_size A_ = BeitForImageClassification(_lowercase) model.to(_lowercase) model.eval() A_ = model(_lowercase , labels=_lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images A_ = 1 A_ = BeitForImageClassification(_lowercase) model.to(_lowercase) model.eval() A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) A_ = model(_lowercase , labels=_lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def __snake_case ( self : Tuple , _lowercase : List[str] , _lowercase : Any , _lowercase : List[str] , _lowercase : Optional[int]) -> Union[str, Any]: A_ = self.num_labels A_ = BeitForSemanticSegmentation(_lowercase) model.to(_lowercase) model.eval() A_ = model(_lowercase) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2)) A_ = model(_lowercase , labels=_lowercase) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2)) def __snake_case ( self : Optional[int]) -> Dict: A_ = self.prepare_config_and_inputs() A_ , A_ , A_ , A_ = config_and_inputs A_ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __UpperCAmelCase ( lowerCAmelCase ,lowerCAmelCase ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) _UpperCamelCase = ( { """feature-extraction""": BeitModel, """image-classification""": BeitForImageClassification, """image-segmentation""": BeitForSemanticSegmentation, } if is_torch_available() else {} ) _UpperCamelCase = False _UpperCamelCase = False _UpperCamelCase = False def __snake_case ( self : str) -> str: A_ = BeitModelTester(self) A_ = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37) def __snake_case ( self : Optional[Any]) -> Tuple: self.config_tester.run_common_tests() @unittest.skip(reason='BEiT does not use inputs_embeds') def __snake_case ( self : str) -> List[Any]: pass @require_torch_multi_gpu @unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`') def __snake_case ( self : Optional[Any]) -> str: pass def __snake_case ( self : List[Any]) -> Optional[int]: A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ = model_class(_lowercase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) A_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowercase , nn.Linear)) def __snake_case ( self : Optional[Any]) -> Optional[Any]: A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ = model_class(_lowercase) A_ = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ = [*signature.parameters.keys()] A_ = ['pixel_values'] self.assertListEqual(arg_names[:1] , _lowercase) def __snake_case ( self : List[str]) -> str: A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowercase) def __snake_case ( self : List[Any]) -> Optional[int]: A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowercase) def __snake_case ( self : int) -> List[str]: A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowercase) def __snake_case ( self : Union[str, Any]) -> Optional[int]: A_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase) def __snake_case ( self : int) -> str: if not self.model_tester.is_training: return A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() A_ = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(_lowercase), BeitForMaskedImageModeling]: continue A_ = model_class(_lowercase) model.to(_lowercase) model.train() A_ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase) A_ = model(**_lowercase).loss loss.backward() def __snake_case ( self : List[Any]) -> Optional[int]: A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return A_ = False A_ = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(_lowercase), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue A_ = model_class(_lowercase) model.gradient_checkpointing_enable() model.to(_lowercase) model.train() A_ = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase) A_ = model(**_lowercase).loss loss.backward() def __snake_case ( self : int) -> int: A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common() A_ = _config_zero_init(_lowercase) for model_class in self.all_model_classes: A_ = model_class(config=_lowercase) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) @slow def __snake_case ( self : int) -> str: for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ = BeitModel.from_pretrained(_lowercase) self.assertIsNotNone(_lowercase) def lowerCamelCase( ) -> str: A_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def __snake_case ( self : Optional[Any]) -> int: return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224') if is_vision_available() else None @slow def __snake_case ( self : Any) -> Optional[Any]: A_ = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k').to(_lowercase) A_ = self.default_image_processor A_ = prepare_img() A_ = image_processor(images=_lowercase , return_tensors='pt').pixel_values.to(_lowercase) # prepare bool_masked_pos A_ = torch.ones((1, 196) , dtype=torch.bool).to(_lowercase) # forward pass with torch.no_grad(): A_ = model(pixel_values=_lowercase , bool_masked_pos=_lowercase) A_ = outputs.logits # verify the logits A_ = torch.Size((1, 196, 8_192)) self.assertEqual(logits.shape , _lowercase) A_ = torch.tensor( [[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]]).to(_lowercase) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _lowercase , atol=1E-2)) @slow def __snake_case ( self : List[str]) -> Any: A_ = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224').to(_lowercase) A_ = self.default_image_processor A_ = prepare_img() A_ = image_processor(images=_lowercase , return_tensors='pt').to(_lowercase) # forward pass with torch.no_grad(): A_ = model(**_lowercase) A_ = outputs.logits # verify the logits A_ = torch.Size((1, 1_000)) self.assertEqual(logits.shape , _lowercase) A_ = torch.tensor([-1.23_85, -1.09_87, -1.01_08]).to(_lowercase) self.assertTrue(torch.allclose(logits[0, :3] , _lowercase , atol=1E-4)) A_ = 281 self.assertEqual(logits.argmax(-1).item() , _lowercase) @slow def __snake_case ( self : Optional[int]) -> List[Any]: A_ = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k').to( _lowercase) A_ = self.default_image_processor A_ = prepare_img() A_ = image_processor(images=_lowercase , return_tensors='pt').to(_lowercase) # forward pass with torch.no_grad(): A_ = model(**_lowercase) A_ = outputs.logits # verify the logits A_ = torch.Size((1, 21_841)) self.assertEqual(logits.shape , _lowercase) A_ = torch.tensor([1.68_81, -0.27_87, 0.59_01]).to(_lowercase) self.assertTrue(torch.allclose(logits[0, :3] , _lowercase , atol=1E-4)) A_ = 2_396 self.assertEqual(logits.argmax(-1).item() , _lowercase) @slow def __snake_case ( self : Tuple) -> Any: A_ = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640') A_ = model.to(_lowercase) A_ = BeitImageProcessor(do_resize=_lowercase , size=640 , do_center_crop=_lowercase) A_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test') A_ = Image.open(ds[0]['file']) A_ = image_processor(images=_lowercase , return_tensors='pt').to(_lowercase) # forward pass with torch.no_grad(): A_ = model(**_lowercase) A_ = outputs.logits # verify the logits A_ = torch.Size((1, 150, 160, 160)) self.assertEqual(logits.shape , _lowercase) A_ = version.parse(PIL.__version__) < version.parse('9.0.0') if is_pillow_less_than_a: A_ = torch.tensor( [ [[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]], [[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]], [[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]], ] , device=_lowercase , ) else: A_ = torch.tensor( [ [[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]], [[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]], [[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]], ] , device=_lowercase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1E-4)) @slow def __snake_case ( self : List[str]) -> Tuple: A_ = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640') A_ = model.to(_lowercase) A_ = BeitImageProcessor(do_resize=_lowercase , size=640 , do_center_crop=_lowercase) A_ = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test') A_ = Image.open(ds[0]['file']) A_ = image_processor(images=_lowercase , return_tensors='pt').to(_lowercase) # forward pass with torch.no_grad(): A_ = model(**_lowercase) A_ = outputs.logits.detach().cpu() A_ = image_processor.post_process_semantic_segmentation(outputs=_lowercase , target_sizes=[(500, 300)]) A_ = torch.Size((500, 300)) self.assertEqual(segmentation[0].shape , _lowercase) A_ = image_processor.post_process_semantic_segmentation(outputs=_lowercase) A_ = torch.Size((160, 160)) self.assertEqual(segmentation[0].shape , _lowercase)
366
0
'''simple docstring''' import os from datetime import datetime as dt from github import Github A_ : Tuple = [ "good first issue", "good second issue", "good difficult issue", "enhancement", "new pipeline/model", "new scheduler", "wip", ] def _UpperCamelCase ( ) -> Tuple: lowerCamelCase_ = Github(os.environ['GITHUB_TOKEN'] ) lowerCamelCase_ = g.get_repo('huggingface/diffusers' ) lowerCamelCase_ = repo.get_issues(state='open' ) for issue in open_issues: lowerCamelCase_ = sorted(issue.get_comments() ,key=lambda __UpperCamelCase : i.created_at ,reverse=__UpperCamelCase ) lowerCamelCase_ = comments[0] if len(__UpperCamelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='closed' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='open' ) issue.remove_from_labels('stale' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) issue.add_to_labels('stale' ) if __name__ == "__main__": main()
715
'''simple docstring''' from ...configuration_utils import PretrainedConfig class UpperCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = 'bert-generation' def __init__( self , SCREAMING_SNAKE_CASE_=50358 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = hidden_act lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = initializer_range lowerCamelCase_ = layer_norm_eps lowerCamelCase_ = position_embedding_type lowerCamelCase_ = use_cache
384
0
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ): __snake_case : List[str] = len(__lowerCamelCase ) __snake_case : Union[str, Any] = len(__lowerCamelCase ) __snake_case : Optional[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] __snake_case : int = True for i in range(__lowerCamelCase ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: __snake_case : Any = True if a[i].islower(): __snake_case : List[str] = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
81
import numpy as np import datasets _UpperCamelCase = ''' Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] ''' _UpperCamelCase = '''\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } ''' _UpperCamelCase = ''' Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric("mahalanobis") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {\'mahalanobis\': array([0.5])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCamelCase ( datasets.Metric ): """simple docstring""" def UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase ) -> Dict: '''simple docstring''' __snake_case : List[str] = np.array(UpperCAmelCase ) __snake_case : Tuple = np.array(UpperCAmelCase ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction __snake_case : int = X - np.mean(UpperCAmelCase ) __snake_case : List[str] = np.cov(reference_distribution.T ) try: __snake_case : List[Any] = np.linalg.inv(UpperCAmelCase ) except np.linalg.LinAlgError: __snake_case : Dict = np.linalg.pinv(UpperCAmelCase ) __snake_case : int = np.dot(UpperCAmelCase , UpperCAmelCase ) __snake_case : Tuple = np.dot(UpperCAmelCase , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
243
0
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class a ( _SCREAMING_SNAKE_CASE ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = (DDPMScheduler,) def lowerCamelCase__ ( self : Optional[Any] , **snake_case : Any ) -> Dict: __UpperCAmelCase : Any = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**A_ ) return config def lowerCamelCase__ ( self : Any ) -> List[Any]: for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=A_ ) def lowerCamelCase__ ( self : Optional[Any] ) -> str: for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=A_ , beta_end=A_ ) def lowerCamelCase__ ( self : Optional[Any] ) -> Any: for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=A_ ) def lowerCamelCase__ ( self : Any ) -> Tuple: for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=A_ ) def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]: for clip_sample in [True, False]: self.check_over_configs(clip_sample=A_ ) def lowerCamelCase__ ( self : Dict ) -> Optional[Any]: self.check_over_configs(thresholding=A_ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=A_ , prediction_type=A_ , sample_max_value=A_ , ) def lowerCamelCase__ ( self : Any ) -> Optional[int]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=A_ ) def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]: for t in [0, 500, 999]: self.check_over_forward(time_step=A_ ) def lowerCamelCase__ ( self : List[Any] ) -> List[str]: __UpperCAmelCase : List[str] = self.scheduler_classes[0] __UpperCAmelCase : Optional[int] = self.get_scheduler_config() __UpperCAmelCase : Optional[int] = scheduler_class(**A_ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]: __UpperCAmelCase : Optional[Any] = self.scheduler_classes[0] __UpperCAmelCase : Union[str, Any] = self.get_scheduler_config() __UpperCAmelCase : Optional[Any] = scheduler_class(**A_ ) __UpperCAmelCase : Dict = len(A_ ) __UpperCAmelCase : Union[str, Any] = self.dummy_model() __UpperCAmelCase : Optional[Any] = self.dummy_sample_deter __UpperCAmelCase : Dict = torch.manual_seed(0 ) for t in reversed(range(A_ ) ): # 1. predict noise residual __UpperCAmelCase : str = model(A_ , A_ ) # 2. predict previous mean of sample x_t-1 __UpperCAmelCase : int = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __UpperCAmelCase : List[Any] = pred_prev_sample __UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(A_ ) ) __UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 258.9_606 ) < 1E-2 assert abs(result_mean.item() - 0.3_372 ) < 1E-3 def lowerCamelCase__ ( self : int ) -> Tuple: __UpperCAmelCase : List[str] = self.scheduler_classes[0] __UpperCAmelCase : Dict = self.get_scheduler_config(prediction_type='''v_prediction''' ) __UpperCAmelCase : Dict = scheduler_class(**A_ ) __UpperCAmelCase : Optional[int] = len(A_ ) __UpperCAmelCase : List[str] = self.dummy_model() __UpperCAmelCase : Any = self.dummy_sample_deter __UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) for t in reversed(range(A_ ) ): # 1. predict noise residual __UpperCAmelCase : Union[str, Any] = model(A_ , A_ ) # 2. predict previous mean of sample x_t-1 __UpperCAmelCase : Dict = scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __UpperCAmelCase : List[Any] = pred_prev_sample __UpperCAmelCase : Optional[int] = torch.sum(torch.abs(A_ ) ) __UpperCAmelCase : List[Any] = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 202.0_296 ) < 1E-2 assert abs(result_mean.item() - 0.2_631 ) < 1E-3 def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]: __UpperCAmelCase : Dict = self.scheduler_classes[0] __UpperCAmelCase : int = self.get_scheduler_config() __UpperCAmelCase : Optional[Any] = scheduler_class(**A_ ) __UpperCAmelCase : Dict = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=A_ ) __UpperCAmelCase : str = scheduler.timesteps for i, timestep in enumerate(A_ ): if i == len(A_ ) - 1: __UpperCAmelCase : int = -1 else: __UpperCAmelCase : Dict = timesteps[i + 1] __UpperCAmelCase : Optional[int] = scheduler.previous_timestep(A_ ) __UpperCAmelCase : Any = prev_t.item() self.assertEqual(A_ , A_ ) def lowerCamelCase__ ( self : int ) -> Tuple: __UpperCAmelCase : Any = self.scheduler_classes[0] __UpperCAmelCase : Tuple = self.get_scheduler_config() __UpperCAmelCase : Dict = scheduler_class(**A_ ) __UpperCAmelCase : Optional[int] = [100, 87, 50, 51, 0] with self.assertRaises(A_ , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=A_ ) def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]: __UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0] __UpperCAmelCase : Optional[int] = self.get_scheduler_config() __UpperCAmelCase : str = scheduler_class(**A_ ) __UpperCAmelCase : Tuple = [100, 87, 50, 1, 0] __UpperCAmelCase : Tuple = len(A_ ) with self.assertRaises(A_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=A_ , timesteps=A_ ) def lowerCamelCase__ ( self : Union[str, Any] ) -> int: __UpperCAmelCase : Dict = self.scheduler_classes[0] __UpperCAmelCase : List[Any] = self.get_scheduler_config() __UpperCAmelCase : Dict = scheduler_class(**A_ ) __UpperCAmelCase : List[Any] = [scheduler.config.num_train_timesteps] with self.assertRaises( A_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=A_ )
703
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer __UpperCAmelCase :Optional[int] = ["bert-base-uncased", "bert-base-cased"] __UpperCAmelCase :str = "hf-internal-testing/tiny-bert-tf-only" if is_tf_available(): class a ( tf.keras.Model ): """simple docstring""" def __init__( self : List[str] , snake_case : List[str] ) -> str: super().__init__() __UpperCAmelCase : List[str] = tokenizer __UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(snake_case ) __UpperCAmelCase : int = TFAutoModel.from_config(snake_case ) def lowerCamelCase__ ( self : List[Any] , snake_case : Optional[int] ) -> Optional[Any]: __UpperCAmelCase : List[Any] = self.tokenizer(snake_case ) __UpperCAmelCase : Optional[Any] = self.bert(**snake_case ) return out["pooler_output"] @require_tf @require_tensorflow_text class a ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : Optional[int] ) -> List[str]: super().setUp() __UpperCAmelCase : Tuple = [ BertTokenizer.from_pretrained(snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false __UpperCAmelCase : Any = [TFBertTokenizer.from_pretrained(snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(snake_case , use_fast_bert_tokenizer=snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) __UpperCAmelCase : Optional[int] = [ '''This is a straightforward English test sentence.''', '''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''', '''Now we\'re going to add some Chinese: 一 二 三 一二三''', '''And some much more rare Chinese: 齉 堃 齉堃''', '''Je vais aussi écrire en français pour tester les accents''', '''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''', ] __UpperCAmelCase : Optional[int] = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]: for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): __UpperCAmelCase : Any = tokenizer(snake_case , return_tensors='''tf''' , padding='''longest''' ) __UpperCAmelCase : Optional[int] = tf_tokenizer(snake_case ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def lowerCamelCase__ ( self : List[Any] ) -> str: for tf_tokenizer in self.tf_tokenizers: __UpperCAmelCase : Any = tf_tokenizer(self.paired_sentences ) __UpperCAmelCase : Union[str, Any] = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def lowerCamelCase__ ( self : str ) -> Union[str, Any]: for tf_tokenizer in self.tf_tokenizers: __UpperCAmelCase : Optional[int] = tf.function(snake_case ) for test_inputs in (self.test_sentences, self.paired_sentences): __UpperCAmelCase : int = tf.constant(snake_case ) __UpperCAmelCase : Tuple = compiled_tokenizer(snake_case ) __UpperCAmelCase : Optional[int] = tf_tokenizer(snake_case ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def lowerCamelCase__ ( self : str ) -> str: for tf_tokenizer in self.tf_tokenizers: __UpperCAmelCase : List[Any] = ModelToSave(tokenizer=snake_case ) __UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor(self.test_sentences ) __UpperCAmelCase : Tuple = model(snake_case ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: __UpperCAmelCase : Any = Path(snake_case ) / '''saved.model''' model.save(snake_case ) __UpperCAmelCase : str = tf.keras.models.load_model(snake_case ) __UpperCAmelCase : Optional[int] = loaded_model(snake_case ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
266
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __magic_name__ ( __a ): """simple docstring""" lowerCAmelCase : Optional[Any] = '''dandelin/vilt-b32-finetuned-vqa''' lowerCAmelCase : Optional[Any] = ( '''This is a tool that answers a question about an image. It takes an input named `image` which should be the ''' '''image containing the information, as well as a `question` which should be the question in English. It ''' '''returns a text that is the answer to the question.''' ) lowerCAmelCase : List[str] = '''image_qa''' lowerCAmelCase : Dict = AutoProcessor lowerCAmelCase : Optional[Any] = AutoModelForVisualQuestionAnswering lowerCAmelCase : int = ['''image''', '''text'''] lowerCAmelCase : Union[str, Any] = ['''text'''] def __init__( self : Any , *_lowercase : Dict , **_lowercase : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*_lowercase , **_lowercase ) def lowerCAmelCase ( self : List[Any] , _lowercase : "Image" , _lowercase : str ): """simple docstring""" return self.pre_processor(_lowercase , _lowercase , return_tensors='''pt''' ) def lowerCAmelCase ( self : Optional[Any] , _lowercase : int ): """simple docstring""" with torch.no_grad(): return self.model(**_lowercase ).logits def lowerCAmelCase ( self : List[str] , _lowercase : Dict ): """simple docstring""" _UpperCamelCase: Any = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
271
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys UpperCAmelCase_ = '''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
271
1
"""simple docstring""" import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch UpperCAmelCase = logging.get_logger(__name__) class UpperCAmelCase_ : def __init__( self : int , __UpperCamelCase : str = None , __UpperCamelCase : uuid.UUID = None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Union[str, Any]=None ) -> Union[str, Any]: if not conversation_id: _UpperCamelCase = uuid.uuida() if past_user_inputs is None: _UpperCamelCase = [] if generated_responses is None: _UpperCamelCase = [] _UpperCamelCase = conversation_id _UpperCamelCase = past_user_inputs _UpperCamelCase = generated_responses _UpperCamelCase = text def __eq__( self : Optional[Any] , __UpperCamelCase : Any ) -> int: if not isinstance(__UpperCamelCase , __UpperCamelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def _UpperCamelCase ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : bool = False ) -> List[str]: if self.new_user_input: if overwrite: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ''' F'''with: "{text}".''' ) _UpperCamelCase = text else: logger.warning( F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input ''' F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' ) else: _UpperCamelCase = text def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) _UpperCamelCase = None def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : str ) -> List[str]: self.generated_responses.append(__UpperCamelCase ) def _UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]: for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self : Optional[int] ) -> List[str]: _UpperCamelCase = F'''Conversation id: {self.uuid} \n''' for is_user, text in self.iter_texts(): _UpperCamelCase = '''user''' if is_user else '''bot''' output += F'''{name} >> {text} \n''' return output @add_end_docstrings( _lowercase , r''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class UpperCAmelCase_ ( _lowercase): def __init__( self : int , *__UpperCamelCase : List[str] , **__UpperCamelCase : Any ) -> Union[str, Any]: super().__init__(*__UpperCamelCase , **__UpperCamelCase ) if self.tokenizer.pad_token_id is None: _UpperCamelCase = self.tokenizer.eos_token def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : Tuple=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : int=None , **__UpperCamelCase : List[Any] ) -> Union[str, Any]: _UpperCamelCase = {} _UpperCamelCase = {} _UpperCamelCase = {} if min_length_for_response is not None: _UpperCamelCase = min_length_for_response if minimum_tokens is not None: _UpperCamelCase = minimum_tokens if "max_length" in generate_kwargs: _UpperCamelCase = generate_kwargs['''max_length'''] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: _UpperCamelCase = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(__UpperCamelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self : List[Any] , __UpperCamelCase : Union[Conversation, List[Conversation]] , __UpperCamelCase : Any=0 , **__UpperCamelCase : Dict ) -> Optional[Any]: _UpperCamelCase = super().__call__(__UpperCamelCase , num_workers=__UpperCamelCase , **__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) == 1: return outputs[0] return outputs def _UpperCamelCase ( self : str , __UpperCamelCase : Conversation , __UpperCamelCase : List[Any]=32 ) -> Dict[str, Any]: if not isinstance(__UpperCamelCase , __UpperCamelCase ): raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' ) if conversation.new_user_input is None: raise ValueError( F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ''' '''Add user inputs with the conversation\'s `add_user_input` method''' ) if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ): _UpperCamelCase = self.tokenizer._build_conversation_input_ids(__UpperCamelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version _UpperCamelCase = self._legacy_parse_and_tokenize(__UpperCamelCase ) if self.framework == "pt": _UpperCamelCase = torch.LongTensor([input_ids] ) elif self.framework == "tf": _UpperCamelCase = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : List[Any]=10 , **__UpperCamelCase : Optional[int] ) -> str: _UpperCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) _UpperCamelCase = model_inputs['''input_ids'''].shape[1] if max_length - minimum_tokens < n: logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' ) _UpperCamelCase = max_length - minimum_tokens _UpperCamelCase = model_inputs['''input_ids'''][:, -trim:] if "attention_mask" in model_inputs: _UpperCamelCase = model_inputs['''attention_mask'''][:, -trim:] _UpperCamelCase = model_inputs.pop('''conversation''' ) _UpperCamelCase = max_length _UpperCamelCase = self.model.generate(**__UpperCamelCase , **__UpperCamelCase ) if self.model.config.is_encoder_decoder: _UpperCamelCase = 1 else: _UpperCamelCase = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def _UpperCamelCase ( self : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any]=True ) -> str: _UpperCamelCase = model_outputs['''output_ids'''] _UpperCamelCase = self.tokenizer.decode( output_ids[0] , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , ) _UpperCamelCase = model_outputs['''conversation'''] conversation.mark_processed() conversation.append_response(__UpperCamelCase ) return conversation def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : Conversation ) -> Dict: _UpperCamelCase = self.tokenizer.eos_token_id _UpperCamelCase = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) ) if len(__UpperCamelCase ) > self.tokenizer.model_max_length: _UpperCamelCase = input_ids[-self.tokenizer.model_max_length :] return input_ids
342
"""simple docstring""" def lowercase ( a__ : str , a__ : str ) -> float: def get_matched_characters(a__ : str , a__ : str ) -> str: _UpperCamelCase = [] _UpperCamelCase = min(len(_stra ) , len(_stra ) ) // 2 for i, l in enumerate(_stra ): _UpperCamelCase = int(max(0 , i - limit ) ) _UpperCamelCase = int(min(i + limit + 1 , len(_stra ) ) ) if l in _stra[left:right]: matched.append(a__ ) _UpperCamelCase = F'''{_stra[0:_stra.index(a__ )]} {_stra[_stra.index(a__ ) + 1:]}''' return "".join(a__ ) # matching characters _UpperCamelCase = get_matched_characters(a__ , a__ ) _UpperCamelCase = get_matched_characters(a__ , a__ ) _UpperCamelCase = len(a__ ) # transposition _UpperCamelCase = ( len([(ca, ca) for ca, ca in zip(a__ , a__ ) if ca != ca] ) // 2 ) if not match_count: _UpperCamelCase = 0.0 else: _UpperCamelCase = ( 1 / 3 * ( match_count / len(a__ ) + match_count / len(a__ ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters _UpperCamelCase = 0 for ca, ca in zip(stra[:4] , stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler("""hello""", """world"""))
342
1
"""simple docstring""" a__ : str = tuple[float, float, float] a__ : Dict = tuple[float, float, float] def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = end_pointa[0] - end_pointa[0] __SCREAMING_SNAKE_CASE = end_pointa[1] - end_pointa[1] __SCREAMING_SNAKE_CASE = end_pointa[2] - end_pointa[2] return (x, y, z) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ab[1] * ac[2] - ab[2] * ac[1] # *i __SCREAMING_SNAKE_CASE = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j __SCREAMING_SNAKE_CASE = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' return tuple(round(lowerCAmelCase_ , lowerCAmelCase_ ) for x in vector ) == (0, 0, 0) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10 ): '''simple docstring''' __SCREAMING_SNAKE_CASE = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
682
"""simple docstring""" import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : List[Any] = AutoencoderKL snake_case__ : Optional[Any] = "sample" snake_case__ : Optional[Any] = 1E-2 @property def UpperCAmelCase_ ( self : Tuple ) -> int: __SCREAMING_SNAKE_CASE = 4 __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = (3_2, 3_2) __SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase__ ) return {"sample": image} @property def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]: return (3, 3_2, 3_2) @property def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]: return (3, 3_2, 3_2) def UpperCAmelCase_ ( self : List[str] ) -> List[str]: __SCREAMING_SNAKE_CASE = { "block_out_channels": [3_2, 6_4], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } __SCREAMING_SNAKE_CASE = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]: pass def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def UpperCAmelCase_ ( self : str ) -> List[Any]: # enable deterministic behavior for gradient checkpointing __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.prepare_init_args_and_inputs_for_common() __SCREAMING_SNAKE_CASE = self.model_class(**UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) assert not model.is_gradient_checkpointing and model.training __SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() __SCREAMING_SNAKE_CASE = torch.randn_like(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing __SCREAMING_SNAKE_CASE = self.model_class(**UpperCAmelCase__ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(UpperCAmelCase__ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training __SCREAMING_SNAKE_CASE = model_a(**UpperCAmelCase__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() __SCREAMING_SNAKE_CASE = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) __SCREAMING_SNAKE_CASE = dict(model.named_parameters() ) __SCREAMING_SNAKE_CASE = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def UpperCAmelCase_ ( self : List[str] ) -> Any: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) __SCREAMING_SNAKE_CASE = model.to(UpperCAmelCase__ ) model.eval() if torch_device == "mps": __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 ) __SCREAMING_SNAKE_CASE = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) __SCREAMING_SNAKE_CASE = image.to(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ , generator=UpperCAmelCase__ ).sample __SCREAMING_SNAKE_CASE = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": __SCREAMING_SNAKE_CASE = torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": __SCREAMING_SNAKE_CASE = torch.tensor( [-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] ) else: __SCREAMING_SNAKE_CASE = torch.tensor( [-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] ) self.assertTrue(torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-2 ) ) @slow class UpperCamelCase_ ( unittest.TestCase): """simple docstring""" def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> Any: return F"""gaussian_noise_s={seed}_shape={'_'.join([str(UpperCAmelCase__ ) for s in shape] )}.npy""" def UpperCAmelCase_ ( self : Optional[int] ) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : Optional[Any]=(4, 3, 5_1_2, 5_1_2) , UpperCAmelCase__ : Any=False ) -> List[str]: __SCREAMING_SNAKE_CASE = torch.floataa if fpaa else torch.floataa __SCREAMING_SNAKE_CASE = torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCAmelCase__ , UpperCAmelCase__ ) ) ).to(UpperCAmelCase__ ).to(UpperCAmelCase__ ) return image def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Dict="CompVis/stable-diffusion-v1-4" , UpperCAmelCase__ : Optional[Any]=False ) -> Tuple: __SCREAMING_SNAKE_CASE = "fp16" if fpaa else None __SCREAMING_SNAKE_CASE = torch.floataa if fpaa else torch.floataa __SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained( UpperCAmelCase__ , subfolder="vae" , torch_dtype=UpperCAmelCase__ , revision=UpperCAmelCase__ , ) model.to(UpperCAmelCase__ ).eval() return model def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int=0 ) -> str: if torch_device == "mps": return torch.manual_seed(UpperCAmelCase__ ) return torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ ) @parameterized.expand( [ # fmt: off [3_3, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [4_7, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , generator=UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, -2:, :2].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [3_3, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]], [4_7, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , generator=UpperCAmelCase__ , sample_posterior=UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [3_3, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [4_7, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE = sample[-1, -2:, -2:, :2].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [1_3, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]], [3_7, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ) -> str: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) @parameterized.expand( [ # fmt: off [2_7, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]], [1_6, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) , fpaa=UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] __SCREAMING_SNAKE_CASE = sample[-1, -2:, :2, -2:].flatten().float().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=5E-3 ) @parameterized.expand([(1_3,), (1_6,), (2_7,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Union[str, Any] ) -> List[str]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model(fpaa=UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) , fpaa=UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-1 ) @parameterized.expand([(1_3,), (1_6,), (3_7,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Tuple ) -> Dict: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ , shape=(3, 4, 6_4, 6_4) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.decode(UpperCAmelCase__ ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [3_3, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]], [4_7, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]], # fmt: on ] ) def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple ) -> Optional[int]: __SCREAMING_SNAKE_CASE = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE = self.get_sd_image(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_generator(UpperCAmelCase__ ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model.encode(UpperCAmelCase__ ).latent_dist __SCREAMING_SNAKE_CASE = dist.sample(generator=UpperCAmelCase__ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] __SCREAMING_SNAKE_CASE = sample[0, -1, -3:, -3:].flatten().cpu() __SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(UpperCAmelCase__ , UpperCAmelCase__ , atol=UpperCAmelCase__ )
682
1
from collections import deque class __UpperCamelCase : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: a__ = process_name # process name a__ = arrival_time # arrival time of the process # completion time of finished process or last interrupted time a__ = arrival_time a__ = burst_time # remaining burst time a__ = 0 # total time of the process wait in ready queue a__ = 0 # time from arrival time to completion time class __UpperCamelCase : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> None: # total number of mlfq's queues a__ = number_of_queues # time slice of queues that round robin algorithm applied a__ = time_slices # unfinished process is in this ready_queue a__ = queue # current time a__ = current_time # finished process is in this sequence queue a__ = deque() def _UpperCAmelCase ( self ) -> list[str]: a__ = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[int]: a__ = [] for i in range(len(SCREAMING_SNAKE_CASE ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[int]: a__ = [] for i in range(len(SCREAMING_SNAKE_CASE ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[int]: a__ = [] for i in range(len(SCREAMING_SNAKE_CASE ) ): completion_times.append(queue[i].stop_time ) return completion_times def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[int]: return [q.burst_time for q in queue] def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: process.waiting_time += self.current_time - process.stop_time return process.waiting_time def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> deque[Process]: a__ = deque() # sequence deque of finished process while len(SCREAMING_SNAKE_CASE ) != 0: a__ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(SCREAMING_SNAKE_CASE ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 a__ = 0 # set the process's turnaround time because it is finished a__ = self.current_time - cp.arrival_time # set the completion time a__ = self.current_time # add the process to queue that has finished queue finished.append(SCREAMING_SNAKE_CASE ) self.finish_queue.extend(SCREAMING_SNAKE_CASE ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[deque[Process], deque[Process]]: a__ = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(SCREAMING_SNAKE_CASE ) ): a__ = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(SCREAMING_SNAKE_CASE ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time a__ = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(SCREAMING_SNAKE_CASE ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished a__ = 0 # set the finish time a__ = self.current_time # update the process' turnaround time because it is finished a__ = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(SCREAMING_SNAKE_CASE ) self.finish_queue.extend(SCREAMING_SNAKE_CASE ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def _UpperCAmelCase ( self ) -> deque[Process]: # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1 ): a__ , a__ = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest a_ : Tuple = Process('P1', 0, 53) a_ : str = Process('P2', 0, 17) a_ : Optional[int] = Process('P3', 0, 68) a_ : Optional[Any] = Process('P4', 0, 24) a_ : Tuple = 3 a_ : Tuple = [17, 25] a_ : List[str] = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])}) a_ : Any = Process('P1', 0, 53) a_ : Union[str, Any] = Process('P2', 0, 17) a_ : Optional[int] = Process('P3', 0, 68) a_ : List[Any] = Process('P4', 0, 24) a_ : str = 3 a_ : Optional[Any] = [17, 25] a_ : List[str] = deque([Pa, Pa, Pa, Pa]) a_ : int = MLFQ(number_of_queues, time_slices, queue, 0) a_ : Optional[int] = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( f'waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print completion times of processes(P1, P2, P3, P4) print( f'completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print total turnaround times of processes(P1, P2, P3, P4) print( f'turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}' ) # print sequence of finished processes print( f'sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}' )
716
def __a ( __UpperCAmelCase , __UpperCAmelCase ): # Check if the input is valid if not len(__UpperCAmelCase ) == len(__UpperCAmelCase ) == 3: raise ValueError('''Please enter a valid equation.''' ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError('''Both a & b of two equations can\'t be zero.''' ) # Extract the coefficients a__ , a__ , a__ = equationa a__ , a__ , a__ = equationa # Calculate the determinants of the matrices a__ = aa * ba - aa * ba a__ = ca * ba - ca * ba a__ = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError('''Infinite solutions. (Consistent system)''' ) else: raise ValueError('''No solution. (Inconsistent system)''' ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: a__ = determinant_x / determinant a__ = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
148
0
import unittest from transformers import DonutProcessor lowercase = "naver-clova-ix/donut-base" class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def _UpperCamelCase ( self ) -> Dict: snake_case_ = DonutProcessor.from_pretrained(a ) def _UpperCamelCase ( self ) -> Dict: snake_case_ = { 'name': 'John Doe', 'age': '99', 'city': 'Atlanta', 'state': 'GA', 'zip': '30301', 'phone': '123-4567', 'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}], } snake_case_ = ( '<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>' '<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>' '<s_nicknames><s_nickname>Johnny</s_nickname>' '<sep/><s_nickname>JD</s_nickname></s_nicknames>' ) snake_case_ = self.processor.tokenajson(a ) self.assertDictEqual(a , a )
198
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node lowercase = 4 lowercase = 3 class UpperCamelCase_ ( snake_case_ ): '''simple docstring''' pass def __UpperCAmelCase ( a_): for shard in shards: for i in range(a_): yield {"i": i, "shard": shard} def __UpperCAmelCase ( ): snake_case_ = int(os.environ['RANK']) snake_case_ = int(os.environ['WORLD_SIZE']) snake_case_ = ArgumentParser() parser.add_argument('--streaming' , type=a_) parser.add_argument('--local_rank' , type=a_) parser.add_argument('--num_workers' , type=a_ , default=0) snake_case_ = parser.parse_args() snake_case_ = args.streaming snake_case_ = args.num_workers snake_case_ = {'shards': [f'''shard_{shard_idx}''' for shard_idx in range(a_)]} snake_case_ = IterableDataset.from_generator(a_ , gen_kwargs=a_) if not streaming: snake_case_ = Dataset.from_list(list(a_)) snake_case_ = split_dataset_by_node(a_ , rank=a_ , world_size=a_) snake_case_ = torch.utils.data.DataLoader(a_ , num_workers=a_) snake_case_ = NUM_SHARDS * NUM_ITEMS_PER_SHARD snake_case_ = full_size // world_size expected_local_size += int(rank < (full_size % world_size)) snake_case_ = sum(1 for _ in dataloader) if local_size != expected_local_size: raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''') if __name__ == "__main__": main()
198
1
import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class _snake_case : def __init__( self , a = "cpu" , a = "openai/clip-vit-large-patch14") -> None: SCREAMING_SNAKE_CASE = device SCREAMING_SNAKE_CASE = CLIPTokenizerFast.from_pretrained(a) SCREAMING_SNAKE_CASE = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] SCREAMING_SNAKE_CASE = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] SCREAMING_SNAKE_CASE = torchvision.transforms.Normalize(self.image_mean , self.image_std) SCREAMING_SNAKE_CASE = torchvision.transforms.Resize(224) SCREAMING_SNAKE_CASE = torchvision.transforms.CenterCrop(224) def SCREAMING_SNAKE_CASE__ ( self , a) -> List[Any]: SCREAMING_SNAKE_CASE = self.resize(a) SCREAMING_SNAKE_CASE = self.center_crop(a) SCREAMING_SNAKE_CASE = self.normalize(a) return images def __call__( self , a=None , a=None , **a) -> Optional[Any]: SCREAMING_SNAKE_CASE = self.tokenizer(text=a , **a) SCREAMING_SNAKE_CASE = self.preprocess_img(a) SCREAMING_SNAKE_CASE = {key: value.to(self.device) for (key, value) in encoding.items()} return encoding class _snake_case ( nn.Module ): def __init__( self , a=10 , a=0.01 , a=None , a=None , a=None , a=None , a=None , a=None , a=False , a=True , a="image" , a=True , a=False , a=False , a=False , ) -> None: super().__init__() SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = device if device else get_device() if vqgan: SCREAMING_SNAKE_CASE = vqgan else: SCREAMING_SNAKE_CASE = load_vqgan(self.device , conf_path=a , ckpt_path=a) self.vqgan.eval() if clip: SCREAMING_SNAKE_CASE = clip else: SCREAMING_SNAKE_CASE = CLIPModel.from_pretrained('openai/clip-vit-base-patch32') self.clip.to(self.device) SCREAMING_SNAKE_CASE = ProcessorGradientFlow(device=self.device) SCREAMING_SNAKE_CASE = iterations SCREAMING_SNAKE_CASE = lr SCREAMING_SNAKE_CASE = log SCREAMING_SNAKE_CASE = make_grid SCREAMING_SNAKE_CASE = return_val SCREAMING_SNAKE_CASE = quantize SCREAMING_SNAKE_CASE = self.vqgan.decoder.z_shape def SCREAMING_SNAKE_CASE__ ( self , a=None , a=None , a=5 , a=True) -> List[Any]: SCREAMING_SNAKE_CASE = [] if output_path is None: SCREAMING_SNAKE_CASE = './animation.gif' if input_path is None: SCREAMING_SNAKE_CASE = self.save_path SCREAMING_SNAKE_CASE = sorted(glob(input_path + '/*')) if not len(a): raise ValueError( 'No images found in save path, aborting (did you pass save_intermediate=True to the generate' ' function?)') if len(a) == 1: print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)') SCREAMING_SNAKE_CASE = total_duration / len(a) SCREAMING_SNAKE_CASE = [frame_duration] * len(a) if extend_frames: SCREAMING_SNAKE_CASE = 1.5 SCREAMING_SNAKE_CASE = 3 for file_name in paths: if file_name.endswith('.png'): images.append(imageio.imread(a)) imageio.mimsave(a , a , duration=a) print(f'''gif saved to {output_path}''') def SCREAMING_SNAKE_CASE__ ( self , a=None , a=None) -> List[Any]: if not (path or img): raise ValueError('Input either path or tensor') if img is not None: raise NotImplementedError SCREAMING_SNAKE_CASE = preprocess(Image.open(a) , target_image_size=256).to(self.device) SCREAMING_SNAKE_CASE = preprocess_vqgan(a) SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE = self.vqgan.encode(a) return z def SCREAMING_SNAKE_CASE__ ( self , a) -> Union[str, Any]: SCREAMING_SNAKE_CASE = self.latent.detach().requires_grad_() SCREAMING_SNAKE_CASE = base_latent + transform_vector if self.quantize: SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE = self.vqgan.quantize(a) else: SCREAMING_SNAKE_CASE = trans_latent return self.vqgan.decode(a) def SCREAMING_SNAKE_CASE__ ( self , a , a , a=None) -> Any: SCREAMING_SNAKE_CASE = self.clip_preprocessor(text=a , images=a , return_tensors='pt' , padding=a) SCREAMING_SNAKE_CASE = self.clip(**a) SCREAMING_SNAKE_CASE = clip_outputs.logits_per_image if weights is not None: SCREAMING_SNAKE_CASE = similarity_logits * weights return similarity_logits.sum() def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Any: SCREAMING_SNAKE_CASE = self._get_clip_similarity(pos_prompts['prompts'] , a , weights=(1 / pos_prompts['weights'])) if neg_prompts: SCREAMING_SNAKE_CASE = self._get_clip_similarity(neg_prompts['prompts'] , a , weights=neg_prompts['weights']) else: SCREAMING_SNAKE_CASE = torch.tensor([1] , device=self.device) SCREAMING_SNAKE_CASE = -torch.log(a) + torch.log(a) return loss def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> str: SCREAMING_SNAKE_CASE = torch.randn_like(self.latent , requires_grad=a , device=self.device) SCREAMING_SNAKE_CASE = torch.optim.Adam([vector] , lr=self.lr) for i in range(self.iterations): optim.zero_grad() SCREAMING_SNAKE_CASE = self._add_vector(a) SCREAMING_SNAKE_CASE = loop_post_process(a) SCREAMING_SNAKE_CASE = self._get_CLIP_loss(a , a , a) print('CLIP loss' , a) if self.log: wandb.log({'CLIP Loss': clip_loss}) clip_loss.backward(retain_graph=a) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0]) else: yield vector def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> int: wandb.init(reinit=a , project='face-editor') wandb.config.update({'Positive Prompts': positive_prompts}) wandb.config.update({'Negative Prompts': negative_prompts}) wandb.config.update({'lr': self.lr, 'iterations': self.iterations}) if image_path: SCREAMING_SNAKE_CASE = Image.open(a) SCREAMING_SNAKE_CASE = image.resize((256, 256)) wandb.log('Original Image' , wandb.Image(a)) def SCREAMING_SNAKE_CASE__ ( self , a) -> int: if not prompts: return [] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] if isinstance(a , a): SCREAMING_SNAKE_CASE = [prompt.strip() for prompt in prompts.split('|')] for prompt in prompts: if isinstance(a , (tuple, list)): SCREAMING_SNAKE_CASE = prompt[0] SCREAMING_SNAKE_CASE = float(prompt[1]) elif ":" in prompt: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = prompt.split(':') SCREAMING_SNAKE_CASE = float(a) else: SCREAMING_SNAKE_CASE = prompt SCREAMING_SNAKE_CASE = 1.0 processed_prompts.append(a) weights.append(a) return { "prompts": processed_prompts, "weights": torch.tensor(a , device=self.device), } def SCREAMING_SNAKE_CASE__ ( self , a , a=None , a=None , a=True , a=False , a=True , a=True , a=None , ) -> str: if image_path: SCREAMING_SNAKE_CASE = self._get_latent(a) else: SCREAMING_SNAKE_CASE = torch.randn(self.latent_dim , device=self.device) if self.log: self._init_logging(a , a , a) assert pos_prompts, "You must provide at least one positive prompt." SCREAMING_SNAKE_CASE = self.process_prompts(a) SCREAMING_SNAKE_CASE = self.process_prompts(a) if save_final and save_path is None: SCREAMING_SNAKE_CASE = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'])) if not os.path.exists(a): os.makedirs(a) else: SCREAMING_SNAKE_CASE = save_path + '_' + get_timestamp() os.makedirs(a) SCREAMING_SNAKE_CASE = save_path SCREAMING_SNAKE_CASE = self.vqgan.decode(self.latent)[0] if show_intermediate: print('Original Image') show_pil(custom_to_pil(a)) SCREAMING_SNAKE_CASE = loop_post_process(a) for iter, transformed_img in enumerate(self._optimize_CLIP(a , a , a)): if show_intermediate: show_pil(a) if save_intermediate: transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}.png''')) if self.log: wandb.log({'Image': wandb.Image(a)}) if show_final: show_pil(a) if save_final: transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}_final.png'''))
711
a_ : Tuple = { 'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.', 'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.', 'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----', '2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...', '8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.', ':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.', '?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-', '(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/' } # Exclamation mark is not in ITU-R recommendation # fmt: on a_ : List[Any] = {value: key for key, value in MORSE_CODE_DICT.items()} def lowerCamelCase__ (_UpperCAmelCase): return " ".join(MORSE_CODE_DICT[char] for char in message.upper()) def lowerCamelCase__ (_UpperCAmelCase): return "".join(REVERSE_DICT[char] for char in message.split()) def lowerCamelCase__ (): SCREAMING_SNAKE_CASE = 'Morse code here!' print(_UpperCAmelCase) SCREAMING_SNAKE_CASE = encrypt(_UpperCAmelCase) print(_UpperCAmelCase) SCREAMING_SNAKE_CASE = decrypt(_UpperCAmelCase) print(_UpperCAmelCase) if __name__ == "__main__": main()
444
0
from __future__ import annotations def lowerCAmelCase_ (lowercase__ : list[int] ) -> bool: '''simple docstring''' return len(set(lowercase__ ) ) == len(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
668
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def _a ( a :Dict[str, torch.Tensor] ) -> Dict[str, torch.Tensor]: a = [] a = [] a = [] for rt in rc.restypes: a = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) a = {name: i for i, name in enumerate(a )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14 ) restype_atomaa_to_atomaa_list.append([0] * 37 ) restype_atomaa_mask_list.append([0.0] * 14 ) a = torch.tensor( a , dtype=torch.intaa , device=protein['''aatype'''].device , ) a = torch.tensor( a , dtype=torch.intaa , device=protein['''aatype'''].device , ) a = torch.tensor( a , dtype=torch.floataa , device=protein['''aatype'''].device , ) a = protein['''aatype'''].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein a = restype_atomaa_to_atomaa[protein_aatype] a = restype_atomaa_mask[protein_aatype] a = residx_atomaa_mask a = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back a = restype_atomaa_to_atomaa[protein_aatype] a = residx_atomaa_to_atomaa.long() # create the corresponding mask a = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device ) for restype, restype_letter in enumerate(rc.restypes ): a = rc.restype_atoa[restype_letter] a = rc.residue_atoms[restype_name] for atom_name in atom_names: a = rc.atom_order[atom_name] a = 1 a = restype_atomaa_mask[protein_aatype] a = residx_atomaa_mask return protein def _a ( a :Dict[str, torch.Tensor] ) -> Dict[str, np.ndarray]: a = tree_map(lambda a : torch.tensor(a , device=batch['''aatype'''].device ) , a , np.ndarray ) a = tensor_tree_map(lambda a : np.array(a ) , make_atomaa_masks(a ) ) return out
117
0
import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __magic_name__ = logging.getLogger(__name__) def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , ): snake_case__ = bnb_quantization_config.load_in_abit snake_case__ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( "You have a version of `bitsandbytes` that is not compatible with 8bit quantization," " make sure you have the latest version of `bitsandbytes` installed." ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( "You have a version of `bitsandbytes` that is not compatible with 4bit quantization," "make sure you have the latest version of `bitsandbytes` installed." ) snake_case__ = [] # custom device map if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(device_map.keys() ) > 1: snake_case__ = [key for key, value in device_map.items() if value in ["disk", "cpu"]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: snake_case__ = get_keys_to_not_convert(__lowerCAmelCase ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(__lowerCAmelCase ) snake_case__ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: snake_case__ = [] snake_case__ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(__lowerCAmelCase ) # compatibility with peft snake_case__ = load_in_abit snake_case__ = load_in_abit snake_case__ = get_parameter_device(__lowerCAmelCase ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( "It is not recommended to quantize a loaded model. " "The model should be instantiated under the `init_empty_weights` context manager." ) snake_case__ = replace_with_bnb_layers(__lowerCAmelCase , __lowerCAmelCase , modules_to_not_convert=__lowerCAmelCase ) # convert param to the right dtype snake_case__ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: snake_case__ = name.replace(".weight" , "" ).replace(".bias" , "" ) snake_case__ = getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(__lowerCAmelCase ): param.to(__lowerCAmelCase ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("No GPU found. A GPU is needed for quantization." ) logger.info( F"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" "We move the model to cuda." ) return model elif weights_location is None: raise RuntimeError( F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): snake_case__ = replace_with_bnb_layers( __lowerCAmelCase , __lowerCAmelCase , modules_to_not_convert=__lowerCAmelCase ) snake_case__ = get_quantized_model_device_map( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , max_memory=__lowerCAmelCase , no_split_module_classes=__lowerCAmelCase , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): snake_case__ = True snake_case__ = any(x in list(device_map.values() ) for x in ["cpu", "disk"] ) load_checkpoint_in_model( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowerCAmelCase , offload_state_dict=__lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(__lowerCAmelCase , device_map=__lowerCAmelCase , offload_dir=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None ): if device_map is None: if torch.cuda.is_available(): snake_case__ = {"": torch.cuda.current_device()} else: raise RuntimeError("No GPU found. A GPU is needed for quantization." ) logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " "'sequential'." ) snake_case__ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) snake_case__ = {} snake_case__ = special_dtypes snake_case__ = no_split_module_classes snake_case__ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": snake_case__ = get_balanced_memory( __lowerCAmelCase , low_zero=(device_map == "balanced_low_0") , max_memory=__lowerCAmelCase , **__lowerCAmelCase , ) snake_case__ = max_memory snake_case__ = infer_auto_device_map(__lowerCAmelCase , **__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ): # check if don't have any quantized module on the cpu snake_case__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules snake_case__ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( "\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " ) else: logger.info( "Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" ) del device_map_without_some_modules return device_map def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ): if modules_to_not_convert is None: snake_case__ = [] snake_case__ , snake_case__ = _replace_with_bnb_layers( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if not has_been_replaced: logger.warning( "You are loading your model in 8bit or 4bit but no linear modules were found in your model." " this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , ): snake_case__ = False for name, module in model.named_children(): if current_key_name is None: snake_case__ = [] current_key_name.append(__lowerCAmelCase ) if isinstance(__lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` snake_case__ = ".".join(__lowerCAmelCase ) snake_case__ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: snake_case__ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: snake_case__ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: snake_case__ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("load_in_8bit and load_in_4bit can't be both False" ) snake_case__ = module.weight.data if module.bias is not None: snake_case__ = module.bias.data bnb_module.requires_grad_(__lowerCAmelCase ) setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) snake_case__ = True if len(list(module.children() ) ) > 0: snake_case__ , snake_case__ = _replace_with_bnb_layers( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) snake_case__ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ): # Create a copy of the model with init_empty_weights(): snake_case__ = deepcopy(__lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager` snake_case__ = find_tied_parameters(__lowerCAmelCase ) # For compatibility with Accelerate < 0.18 if isinstance(__lowerCAmelCase , __lowerCAmelCase ): snake_case__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: snake_case__ = sum(__lowerCAmelCase , [] ) snake_case__ = len(__lowerCAmelCase ) > 0 # Check if it is a base model snake_case__ = False if hasattr(__lowerCAmelCase , "base_model_prefix" ): snake_case__ = not hasattr(__lowerCAmelCase , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head snake_case__ = list(model.named_children() ) snake_case__ = [list_modules[-1][0]] # add last module together with tied weights snake_case__ = set(__lowerCAmelCase ) - set(__lowerCAmelCase ) snake_case__ = list(set(__lowerCAmelCase ) ) + list(__lowerCAmelCase ) # remove ".weight" from the keys snake_case__ = [".weight", ".bias"] snake_case__ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: snake_case__ = name.replace(__lowerCAmelCase , "" ) filtered_module_names.append(__lowerCAmelCase ) return filtered_module_names def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ): for m in model.modules(): if isinstance(__lowerCAmelCase , bnb.nn.Linearabit ): return True return False def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ): return next(parameter.parameters() ).device def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(__lowerCAmelCase , __lowerCAmelCase , 0 , dtype=__lowerCAmelCase , value=__lowerCAmelCase ) snake_case__ = param_name snake_case__ = model if "." in tensor_name: snake_case__ = tensor_name.split("." ) for split in splits[:-1]: snake_case__ = getattr(__lowerCAmelCase , __lowerCAmelCase ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) snake_case__ = new_module snake_case__ = splits[-1] # offload weights snake_case__ = False offload_weight(module._parameters[tensor_name] , __lowerCAmelCase , __lowerCAmelCase , index=__lowerCAmelCase ) if hasattr(module._parameters[tensor_name] , "SCB" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , __lowerCAmelCase , index=__lowerCAmelCase , ) else: offload_weight(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , index=__lowerCAmelCase ) offload_weight(__lowerCAmelCase , param_name.replace("weight" , "SCB" ) , __lowerCAmelCase , index=__lowerCAmelCase ) set_module_tensor_to_device(__lowerCAmelCase , __lowerCAmelCase , "meta" , dtype=__lowerCAmelCase , value=torch.empty(*param.size() ) )
530
import os import re import shutil import sys import tempfile import unittest import black __magic_name__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. __magic_name__ = ''' def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states ''' class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def A_ ( self ): snake_case__ = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) ) snake_case__ = self.transformer_dir shutil.copy( os.path.join(lowerCamelCase , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , ) def A_ ( self ): snake_case__ = "src/transformers" shutil.rmtree(self.transformer_dir ) def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ): snake_case__ = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: snake_case__ = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result snake_case__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 ) snake_case__ = black.format_str(lowerCamelCase , mode=lowerCamelCase ) snake_case__ = os.path.join(self.transformer_dir , "new_code.py" ) with open(lowerCamelCase , "w" , newline="\n" ) as f: f.write(lowerCamelCase ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase ) with open(lowerCamelCase , "r" ) as f: self.assertTrue(f.read() , lowerCamelCase ) def A_ ( self ): snake_case__ = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" ) self.assertEqual(lowerCamelCase , lowerCamelCase ) def A_ ( self ): # Base copy consistency self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , ) # With no empty line at the end self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , lowerCamelCase , ) # Copy consistency with rename self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , lowerCamelCase ) , ) # Copy consistency with a really long name snake_case__ = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason" self.check_copy_consistency( F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , F"""{long_class_name}LMPredictionHead""" , re.sub("Bert" , lowerCamelCase , lowerCamelCase ) , ) # Copy consistency with overwrite self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , lowerCamelCase , overwrite_result=re.sub("Bert" , "TestModel" , lowerCamelCase ) , ) def A_ ( self ): snake_case__ = check_copies.LOCALIZED_READMES["README_zh-hans.md"] snake_case__ = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the" " Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for" " Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong" " Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1." " **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace)," " released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and" " lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same" " method has been applied to compress GPT2 into" " [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into" " [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation)," " Multilingual BERT into" " [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German" " version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**" " (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders" " as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang" " Luong, Quoc V. Le, Christopher D. Manning." ) snake_case__ = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) snake_case__ = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1." " **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文" " [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and" " lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same" " method has been applied to compress GPT2 into" " [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into" " [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation)," " Multilingual BERT into" " [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German" " version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自" " Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather" " than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le," " Christopher D. Manning 发布。\n" ) snake_case__ , snake_case__ = check_copies.convert_to_localized_md( lowerCamelCase , lowerCamelCase , localized_readme["format_model_list"] ) self.assertFalse(lowerCamelCase ) self.assertEqual(lowerCamelCase , lowerCamelCase ) snake_case__ , snake_case__ = check_copies.convert_to_localized_md( lowerCamelCase , lowerCamelCase , localized_readme["format_model_list"] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(lowerCamelCase ) snake_case__ = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the" " Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for" " Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong" " Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut." ) snake_case__ = ( "1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and" " the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) snake_case__ = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) snake_case__ , snake_case__ = check_copies.convert_to_localized_md( lowerCamelCase , lowerCamelCase , localized_readme["format_model_list"] ) # Check if the model link is synchronized. self.assertEqual(lowerCamelCase , lowerCamelCase )
530
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input lowerCAmelCase_ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def lowerCamelCase_ ( ) -> int: """simple docstring""" snake_case_ : Optional[Any] = _ask_options( '''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: snake_case_ : Union[str, Any] = get_sagemaker_input() else: snake_case_ : Union[str, Any] = get_cluster_input() return config def lowerCamelCase_ ( _UpperCamelCase=None ) -> Optional[int]: """simple docstring""" if subparsers is not None: snake_case_ : Tuple = subparsers.add_parser('''config''' , description=_UpperCamelCase ) else: snake_case_ : Tuple = argparse.ArgumentParser('''Accelerate config command''' , description=_UpperCamelCase ) parser.add_argument( '''--config_file''' , default=_UpperCamelCase , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=_UpperCamelCase ) return parser def lowerCamelCase_ ( _UpperCamelCase ) -> str: """simple docstring""" snake_case_ : Any = get_user_input() if args.config_file is not None: snake_case_ : Tuple = args.config_file else: if not os.path.isdir(_UpperCamelCase ): os.makedirs(_UpperCamelCase ) snake_case_ : Tuple = default_yaml_config_file if config_file.endswith('''.json''' ): config.to_json_file(_UpperCamelCase ) else: config.to_yaml_file(_UpperCamelCase ) print(f'''accelerate configuration saved at {config_file}''' ) def lowerCamelCase_ ( ) -> List[Any]: """simple docstring""" snake_case_ : Union[str, Any] = config_command_parser() snake_case_ : Any = parser.parse_args() config_command(_UpperCamelCase ) if __name__ == "__main__": main()
60
from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" return getitem, k def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Any: """simple docstring""" return setitem, k, v def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple: """simple docstring""" return delitem, k def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) -> str: """simple docstring""" try: return fun(_UpperCamelCase , *_UpperCamelCase ), None except Exception as e: return None, e lowerCAmelCase_ = ( _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), ) lowerCAmelCase_ = [ _set('''key_a''', '''val_a'''), _set('''key_a''', '''val_b'''), ] lowerCAmelCase_ = [ _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), _del('''key_a'''), _del('''key_b'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), ] lowerCAmelCase_ = [ _get('''key_a'''), _del('''key_a'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), _del('''key_a'''), _get('''key_a'''), ] lowerCAmelCase_ = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] lowerCAmelCase_ = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set('''key_a''', '''val_b'''), ] @pytest.mark.parametrize( '''operations''' , ( pytest.param(_add_items , id='''add items''' ), pytest.param(_overwrite_items , id='''overwrite items''' ), pytest.param(_delete_items , id='''delete items''' ), pytest.param(_access_absent_items , id='''access absent items''' ), pytest.param(_add_with_resize_up , id='''add with resize up''' ), pytest.param(_add_with_resize_down , id='''add with resize down''' ), ) , ) def lowerCamelCase_ ( _UpperCamelCase ) -> Any: """simple docstring""" snake_case_ : Any = HashMap(initial_block_size=4 ) snake_case_ : Union[str, Any] = {} for _, (fun, *args) in enumerate(_UpperCamelCase ): snake_case_ , snake_case_ : str = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) snake_case_ , snake_case_ : List[Any] = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) assert my_res == py_res assert str(_UpperCamelCase ) == str(_UpperCamelCase ) assert set(_UpperCamelCase ) == set(_UpperCamelCase ) assert len(_UpperCamelCase ) == len(_UpperCamelCase ) assert set(my.items() ) == set(py.items() ) def lowerCamelCase_ ( ) -> Any: """simple docstring""" def is_public(_UpperCamelCase ) -> bool: return not name.startswith('''_''' ) snake_case_ : str = {name for name in dir({} ) if is_public(_UpperCamelCase )} snake_case_ : str = {name for name in dir(HashMap() ) if is_public(_UpperCamelCase )} assert dict_public_names > hash_public_names
60
1
import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Optional[int]: # picklable for multiprocessing """simple docstring""" return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: """simple docstring""" with parallel_backend("""spark""" ): assert ParallelBackendConfig.backend_name == "spark" SCREAMING_SNAKE_CASE__ = [1, 2, 3] with pytest.raises(__UpperCamelCase ): with parallel_backend("""unsupported backend""" ): map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=2 ) with pytest.raises(__UpperCamelCase ): with parallel_backend("""unsupported backend""" ): map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("""num_proc""" , [2, -1] ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [1, 2] SCREAMING_SNAKE_CASE__ = {"""a""": 1, """b""": 2} SCREAMING_SNAKE_CASE__ = {"""a""": [1, 2], """b""": [3, 4]} SCREAMING_SNAKE_CASE__ = {"""a""": {"""1""": 1}, """b""": 2} SCREAMING_SNAKE_CASE__ = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4} SCREAMING_SNAKE_CASE__ = [2, 3] SCREAMING_SNAKE_CASE__ = {"""a""": 2, """b""": 3} SCREAMING_SNAKE_CASE__ = {"""a""": [2, 3], """b""": [4, 5]} SCREAMING_SNAKE_CASE__ = {"""a""": {"""1""": 2}, """b""": 3} SCREAMING_SNAKE_CASE__ = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5} with parallel_backend("""spark""" ): assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa assert map_nested(__UpperCamelCase , __UpperCamelCase , num_proc=__UpperCamelCase ) == expected_map_nested_sa
379
from math import log from scipy.constants import Boltzmann, physical_constants __lowerCamelCase : int = 300 # TEMPERATURE (unit = K) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float , ) -> float: """simple docstring""" if donor_conc <= 0: raise ValueError("""Donor concentration should be positive""" ) elif acceptor_conc <= 0: raise ValueError("""Acceptor concentration should be positive""" ) elif intrinsic_conc <= 0: raise ValueError("""Intrinsic concentration should be positive""" ) elif donor_conc <= intrinsic_conc: raise ValueError( """Donor concentration should be greater than intrinsic concentration""" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( """Acceptor concentration should be greater than intrinsic concentration""" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
379
1
'''simple docstring''' from __future__ import annotations import bisect def UpperCamelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int , _lowerCamelCase : int = 0 , _lowerCamelCase : int = -1 ): if hi < 0: A__ = len(_lowerCamelCase ) while lo < hi: A__ = lo + (hi - lo) // 2 if sorted_collection[mid] < item: A__ = mid + 1 else: A__ = mid return lo def UpperCamelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int , _lowerCamelCase : int = 0 , _lowerCamelCase : int = -1 ): if hi < 0: A__ = len(_lowerCamelCase ) while lo < hi: A__ = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: A__ = mid + 1 else: A__ = mid return lo def UpperCamelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int , _lowerCamelCase : int = 0 , _lowerCamelCase : int = -1 ): sorted_collection.insert(bisect_left(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase ) def UpperCamelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int , _lowerCamelCase : int = 0 , _lowerCamelCase : int = -1 ): sorted_collection.insert(bisect_right(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase ) def UpperCamelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ): A__ = 0 A__ = len(_lowerCamelCase ) - 1 while left <= right: A__ = left + (right - left) // 2 A__ = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: A__ = midpoint - 1 else: A__ = midpoint + 1 return None def UpperCamelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int ): A__ = bisect.bisect_left(_lowerCamelCase , _lowerCamelCase ) if index != len(_lowerCamelCase ) and sorted_collection[index] == item: return index return None def UpperCamelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ): if right < left: return None A__ = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , midpoint - 1 ) else: return binary_search_by_recursion(_lowerCamelCase , _lowerCamelCase , midpoint + 1 , _lowerCamelCase ) if __name__ == "__main__": __lowerCAmelCase : List[Any] =input("Enter numbers separated by comma:\n").strip() __lowerCAmelCase : int =sorted(int(item) for item in user_input.split(",")) __lowerCAmelCase : Dict =int(input("Enter a single number to be found in the list:\n")) __lowerCAmelCase : Union[str, Any] =binary_search(collection, target) if result is None: print(f"""{target} was not found in {collection}.""") else: print(f"""{target} was found at position {result} in {collection}.""")
440
'''simple docstring''' import string from math import logaa def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : str ): A__ = document.translate( str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" ) A__ = document_without_punctuation.split(" " ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : str ): A__ = corpus.lower().translate( str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with '' A__ = corpus_without_punctuation.split("\n" ) A__ = term.lower() return (len([doc for doc in docs if term in doc] ), len(_lowerCamelCase )) def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : Tuple=False ): if smoothing: if n == 0: raise ValueError("log10(0) is undefined." ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("df must be > 0" ) elif n == 0: raise ValueError("log10(0) is undefined." ) return round(logaa(n / df ) , 3 ) def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : int ): return round(tf * idf , 3 )
440
1
import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 SCREAMING_SNAKE_CASE_ = sys.version_info >= (3, 1_0) def __lowercase ( _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> int: '''simple docstring''' return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class UpperCamelCase__ : '''simple docstring''' __snake_case : int __snake_case : float __snake_case : str __snake_case : bool @dataclass class UpperCamelCase__ : '''simple docstring''' __snake_case : int = 42 __snake_case : str = field(default="toto" , metadata={"help": "help message"} ) @dataclass class UpperCamelCase__ : '''simple docstring''' __snake_case : bool = False __snake_case : bool = True __snake_case : Optional[bool] = None class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : Dict = "titi" __snake_case : Optional[int] = "toto" class UpperCamelCase__ ( lowerCAmelCase_ ): '''simple docstring''' __snake_case : str = "titi" __snake_case : Optional[Any] = "toto" __snake_case : Tuple = 42 @dataclass class UpperCamelCase__ : '''simple docstring''' __snake_case : BasicEnum = "toto" def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = BasicEnum(self.foo ) @dataclass class UpperCamelCase__ : '''simple docstring''' __snake_case : MixedTypeEnum = "toto" def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = MixedTypeEnum(self.foo ) @dataclass class UpperCamelCase__ : '''simple docstring''' __snake_case : Optional[int] = None __snake_case : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "help message"} ) __snake_case : Optional[str] = None __snake_case : Optional[List[str]] = list_field(default=[] ) __snake_case : Optional[List[int]] = list_field(default=[] ) @dataclass class UpperCamelCase__ : '''simple docstring''' __snake_case : List[int] = list_field(default=[] ) __snake_case : List[int] = list_field(default=[1, 2, 3] ) __snake_case : List[str] = list_field(default=["Hallo", "Bonjour", "Hello"] ) __snake_case : List[float] = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class UpperCamelCase__ : '''simple docstring''' __snake_case : List[int] = field() __snake_case : str = field() __snake_case : BasicEnum = field() def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = BasicEnum(self.required_enum ) @dataclass class UpperCamelCase__ : '''simple docstring''' __snake_case : int __snake_case : "BasicEnum" = field() __snake_case : "Optional[bool]" = None __snake_case : "str" = field(default="toto" , metadata={"help": "help message"} ) __snake_case : "List[str]" = list_field(default=["Hallo", "Bonjour", "Hello"] ) if is_python_no_less_than_3_10: @dataclass class UpperCamelCase__ : '''simple docstring''' __snake_case : bool = False __snake_case : bool = True __snake_case : bool | None = None @dataclass class UpperCamelCase__ : '''simple docstring''' __snake_case : int | None = None __snake_case : float | None = field(default=lowerCAmelCase_ , metadata={"help": "help message"} ) __snake_case : str | None = None __snake_case : list[str] | None = list_field(default=[] ) __snake_case : list[int] | None = list_field(default=[] ) class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : argparse.ArgumentParser ,lowerCamelCase__ : argparse.ArgumentParser ) -> Tuple: '''simple docstring''' self.assertEqual(len(a._actions ) ,len(b._actions ) ) for x, y in zip(a._actions ,b._actions ): SCREAMING_SNAKE_CASE = {k: v for k, v in vars(lowerCamelCase__ ).items() if k != """container"""} SCREAMING_SNAKE_CASE = {k: v for k, v in vars(lowerCamelCase__ ).items() if k != """container"""} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get("""choices""" ,lowerCamelCase__ ) and yy.get("""choices""" ,lowerCamelCase__ ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx["""type"""](lowerCamelCase__ ) ,yy["""type"""](lowerCamelCase__ ) ) del xx["type"], yy["type"] self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = argparse.ArgumentParser() expected.add_argument("""--foo""" ,type=lowerCamelCase__ ,required=lowerCamelCase__ ) expected.add_argument("""--bar""" ,type=lowerCamelCase__ ,required=lowerCamelCase__ ) expected.add_argument("""--baz""" ,type=lowerCamelCase__ ,required=lowerCamelCase__ ) expected.add_argument("""--flag""" ,type=lowerCamelCase__ ,default=lowerCamelCase__ ,const=lowerCamelCase__ ,nargs="""?""" ) self.argparsersEqual(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""] ((SCREAMING_SNAKE_CASE), ) = parser.parse_args_into_dataclasses(lowerCamelCase__ ,look_for_args_file=lowerCamelCase__ ) self.assertFalse(example.flag ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = argparse.ArgumentParser() expected.add_argument("""--foo""" ,default=42 ,type=lowerCamelCase__ ) expected.add_argument("""--baz""" ,default="""toto""" ,type=lowerCamelCase__ ,help="""help message""" ) self.argparsersEqual(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = argparse.ArgumentParser() expected.add_argument("""--foo""" ,type=lowerCamelCase__ ,default=lowerCamelCase__ ,const=lowerCamelCase__ ,nargs="""?""" ) expected.add_argument("""--baz""" ,type=lowerCamelCase__ ,default=lowerCamelCase__ ,const=lowerCamelCase__ ,nargs="""?""" ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument("""--no_baz""" ,action="""store_false""" ,default=lowerCamelCase__ ,dest="""baz""" ) expected.add_argument("""--opt""" ,type=lowerCamelCase__ ,default=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(lowerCamelCase__ ) for dataclass_type in dataclass_types: SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase__ ) self.argparsersEqual(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = parser.parse_args([] ) self.assertEqual(lowerCamelCase__ ,Namespace(foo=lowerCamelCase__ ,baz=lowerCamelCase__ ,opt=lowerCamelCase__ ) ) SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """--no_baz"""] ) self.assertEqual(lowerCamelCase__ ,Namespace(foo=lowerCamelCase__ ,baz=lowerCamelCase__ ,opt=lowerCamelCase__ ) ) SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """--baz"""] ) self.assertEqual(lowerCamelCase__ ,Namespace(foo=lowerCamelCase__ ,baz=lowerCamelCase__ ,opt=lowerCamelCase__ ) ) SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] ) self.assertEqual(lowerCamelCase__ ,Namespace(foo=lowerCamelCase__ ,baz=lowerCamelCase__ ,opt=lowerCamelCase__ ) ) SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] ) self.assertEqual(lowerCamelCase__ ,Namespace(foo=lowerCamelCase__ ,baz=lowerCamelCase__ ,opt=lowerCamelCase__ ) ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = argparse.ArgumentParser() expected.add_argument( """--foo""" ,default="""toto""" ,choices=["""titi""", """toto""", 42] ,type=make_choice_type_function(["""titi""", """toto""", 42] ) ,) self.argparsersEqual(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = parser.parse_args([] ) self.assertEqual(args.foo ,"""toto""" ) SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo ,MixedTypeEnum.toto ) SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo ,"""titi""" ) SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0] self.assertEqual(enum_ex.foo ,MixedTypeEnum.titi ) SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo ,42 ) SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0] self.assertEqual(enum_ex.foo ,MixedTypeEnum.fourtytwo ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]: '''simple docstring''' @dataclass class UpperCamelCase__ : '''simple docstring''' __snake_case : Literal["titi", "toto", 42] = "toto" SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = argparse.ArgumentParser() expected.add_argument( """--foo""" ,default="""toto""" ,choices=("""titi""", """toto""", 42) ,type=make_choice_type_function(["""titi""", """toto""", 42] ) ,) self.argparsersEqual(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = parser.parse_args([] ) self.assertEqual(args.foo ,"""toto""" ) SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """titi"""] ) self.assertEqual(args.foo ,"""titi""" ) SCREAMING_SNAKE_CASE = parser.parse_args(["""--foo""", """42"""] ) self.assertEqual(args.foo ,42 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = argparse.ArgumentParser() expected.add_argument("""--foo_int""" ,nargs="""+""" ,default=[] ,type=lowerCamelCase__ ) expected.add_argument("""--bar_int""" ,nargs="""+""" ,default=[1, 2, 3] ,type=lowerCamelCase__ ) expected.add_argument("""--foo_str""" ,nargs="""+""" ,default=["""Hallo""", """Bonjour""", """Hello"""] ,type=lowerCamelCase__ ) expected.add_argument("""--foo_float""" ,nargs="""+""" ,default=[0.1, 0.2, 0.3] ,type=lowerCamelCase__ ) self.argparsersEqual(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = parser.parse_args([] ) self.assertEqual( lowerCamelCase__ ,Namespace(foo_int=[] ,bar_int=[1, 2, 3] ,foo_str=["""Hallo""", """Bonjour""", """Hello"""] ,foo_float=[0.1, 0.2, 0.3] ) ,) SCREAMING_SNAKE_CASE = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() ) self.assertEqual(lowerCamelCase__ ,Namespace(foo_int=[1] ,bar_int=[2, 3] ,foo_str=["""a""", """b""", """c"""] ,foo_float=[0.1, 0.7] ) ) def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = argparse.ArgumentParser() expected.add_argument("""--foo""" ,default=lowerCamelCase__ ,type=lowerCamelCase__ ) expected.add_argument("""--bar""" ,default=lowerCamelCase__ ,type=lowerCamelCase__ ,help="""help message""" ) expected.add_argument("""--baz""" ,default=lowerCamelCase__ ,type=lowerCamelCase__ ) expected.add_argument("""--ces""" ,nargs="""+""" ,default=[] ,type=lowerCamelCase__ ) expected.add_argument("""--des""" ,nargs="""+""" ,default=[] ,type=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(lowerCamelCase__ ) for dataclass_type in dataclass_types: SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase__ ) self.argparsersEqual(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = parser.parse_args([] ) self.assertEqual(lowerCamelCase__ ,Namespace(foo=lowerCamelCase__ ,bar=lowerCamelCase__ ,baz=lowerCamelCase__ ,ces=[] ,des=[] ) ) SCREAMING_SNAKE_CASE = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() ) self.assertEqual(lowerCamelCase__ ,Namespace(foo=12 ,bar=3.14 ,baz="""42""" ,ces=["""a""", """b""", """c"""] ,des=[1, 2, 3] ) ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = argparse.ArgumentParser() expected.add_argument("""--required_list""" ,nargs="""+""" ,type=lowerCamelCase__ ,required=lowerCamelCase__ ) expected.add_argument("""--required_str""" ,type=lowerCamelCase__ ,required=lowerCamelCase__ ) expected.add_argument( """--required_enum""" ,type=make_choice_type_function(["""titi""", """toto"""] ) ,choices=["""titi""", """toto"""] ,required=lowerCamelCase__ ,) self.argparsersEqual(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = argparse.ArgumentParser() expected.add_argument("""--foo""" ,type=lowerCamelCase__ ,required=lowerCamelCase__ ) expected.add_argument( """--required_enum""" ,type=make_choice_type_function(["""titi""", """toto"""] ) ,choices=["""titi""", """toto"""] ,required=lowerCamelCase__ ,) expected.add_argument("""--opt""" ,type=lowerCamelCase__ ,default=lowerCamelCase__ ) expected.add_argument("""--baz""" ,default="""toto""" ,type=lowerCamelCase__ ,help="""help message""" ) expected.add_argument("""--foo_str""" ,nargs="""+""" ,default=["""Hallo""", """Bonjour""", """Hello"""] ,type=lowerCamelCase__ ) self.argparsersEqual(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } SCREAMING_SNAKE_CASE = parser.parse_dict(lowerCamelCase__ )[0] SCREAMING_SNAKE_CASE = BasicExample(**lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, """extra""": 42, } self.assertRaises(lowerCamelCase__ ,parser.parse_dict ,lowerCamelCase__ ,allow_extra_keys=lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""temp_json""" ) os.mkdir(lowerCamelCase__ ) with open(temp_local_path + """.json""" ,"""w+""" ) as f: json.dump(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0] SCREAMING_SNAKE_CASE = BasicExample(**lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = { """foo""": 12, """bar""": 3.14, """baz""": """42""", """flag""": True, } with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ ,"""temp_yaml""" ) os.mkdir(lowerCamelCase__ ) with open(temp_local_path + """.yaml""" ,"""w+""" ) as f: yaml.dump(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0] SCREAMING_SNAKE_CASE = BasicExample(**lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = HfArgumentParser(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ )
116
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: '''simple docstring''' return price * (1 + tax_rate) if __name__ == "__main__": print(F'''{price_plus_tax(1_0_0, 0.25) = }''') print(F'''{price_plus_tax(125.50, 0.05) = }''')
116
1
import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging lowerCamelCase__ : Optional[int] = logging.get_logger(__name__) class _snake_case ( __a ): __lowerCAmelCase : Optional[Any] = CLIPConfig __lowerCAmelCase : int = ['''CLIPEncoderLayer'''] def __init__( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' super().__init__(__UpperCamelCase) lowercase__ : int = CLIPVisionModelWithProjection(config.vision_config) lowercase__ : Any = nn.Linear(config.vision_config.projection_dim , 1) lowercase__ : int = nn.Linear(config.vision_config.projection_dim , 1) @torch.no_grad() def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.5 , SCREAMING_SNAKE_CASE_=0.5): '''simple docstring''' lowercase__ : Dict = self.vision_model(__UpperCamelCase)[0] lowercase__ : int = self.p_head(__UpperCamelCase) lowercase__ : Tuple = nsfw_detected.flatten() lowercase__ : Tuple = nsfw_detected > p_threshold lowercase__ : int = nsfw_detected.tolist() if any(__UpperCamelCase): logger.warning( """Potential NSFW content was detected in one or more images. A black image will be returned instead.""" """ Try again with a different prompt and/or seed.""") for idx, nsfw_detected_ in enumerate(__UpperCamelCase): if nsfw_detected_: lowercase__ : str = np.zeros(images[idx].shape) lowercase__ : List[str] = self.w_head(__UpperCamelCase) lowercase__ : Tuple = watermark_detected.flatten() lowercase__ : Dict = watermark_detected > w_threshold lowercase__ : Any = watermark_detected.tolist() if any(__UpperCamelCase): logger.warning( """Potential watermarked content was detected in one or more images. A black image will be returned instead.""" """ Try again with a different prompt and/or seed.""") for idx, watermark_detected_ in enumerate(__UpperCamelCase): if watermark_detected_: lowercase__ : Tuple = np.zeros(images[idx].shape) return images, nsfw_detected, watermark_detected
12
import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class UpperCamelCase ( __a ): def A_ (self ) -> Any: UpperCamelCase_ : Dict = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__UpperCamelCase , """hidden_sizes""" ) ) self.parent.assertTrue(hasattr(__UpperCamelCase , """num_attention_heads""" ) ) class UpperCamelCase : def __init__(self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=64 , __UpperCamelCase=3 , __UpperCamelCase=3 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=16 , __UpperCamelCase=[128, 256, 384] , __UpperCamelCase=[4, 6, 8] , __UpperCamelCase=[2, 3, 4] , __UpperCamelCase=[16, 16, 16] , __UpperCamelCase=0 , __UpperCamelCase=[2, 2, 2] , __UpperCamelCase=[2, 2, 2] , __UpperCamelCase=0.02 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=2 , ) -> Optional[int]: UpperCamelCase_ : Tuple = parent UpperCamelCase_ : Optional[Any] = batch_size UpperCamelCase_ : Dict = image_size UpperCamelCase_ : Dict = num_channels UpperCamelCase_ : Optional[Any] = kernel_size UpperCamelCase_ : int = stride UpperCamelCase_ : str = padding UpperCamelCase_ : Tuple = hidden_sizes UpperCamelCase_ : int = num_attention_heads UpperCamelCase_ : List[str] = depths UpperCamelCase_ : Dict = key_dim UpperCamelCase_ : Any = drop_path_rate UpperCamelCase_ : List[Any] = patch_size UpperCamelCase_ : Any = attention_ratio UpperCamelCase_ : Optional[Any] = mlp_ratio UpperCamelCase_ : Optional[int] = initializer_range UpperCamelCase_ : Optional[Any] = [ ["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] UpperCamelCase_ : Tuple = is_training UpperCamelCase_ : Any = use_labels UpperCamelCase_ : Dict = num_labels UpperCamelCase_ : List[str] = initializer_range def A_ (self ) -> Dict: UpperCamelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase_ : List[str] = None if self.use_labels: UpperCamelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase_ : Any = self.get_config() return config, pixel_values, labels def A_ (self ) -> Optional[int]: return LevitConfig( image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , ) def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]: UpperCamelCase_ : int = LevitModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() UpperCamelCase_ : List[Any] = model(__UpperCamelCase ) UpperCamelCase_ : int = (self.image_size, self.image_size) UpperCamelCase_,UpperCamelCase_ : Optional[int] = image_size[0], image_size[1] for _ in range(4 ): UpperCamelCase_ : Union[str, Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) UpperCamelCase_ : List[Any] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , ) def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any: UpperCamelCase_ : List[str] = self.num_labels UpperCamelCase_ : Any = LevitForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() UpperCamelCase_ : int = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ (self ) -> str: UpperCamelCase_ : Tuple = self.prepare_config_and_inputs() UpperCamelCase_,UpperCamelCase_,UpperCamelCase_ : Any = config_and_inputs UpperCamelCase_ : Tuple = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase ( __a , __a , unittest.TestCase ): a__ :Any = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) a__ :str = ( { '''feature-extraction''': LevitModel, '''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) a__ :Optional[Any] = False a__ :Optional[int] = False a__ :Tuple = False a__ :List[str] = False a__ :Dict = False def A_ (self ) -> List[Any]: UpperCamelCase_ : int = LevitModelTester(self ) UpperCamelCase_ : Union[str, Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 ) def A_ (self ) -> Dict: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A_ (self ) -> Optional[Any]: return @unittest.skip(reason="""Levit does not use inputs_embeds""" ) def A_ (self ) -> int: pass @unittest.skip(reason="""Levit does not support input and output embeddings""" ) def A_ (self ) -> Any: pass @unittest.skip(reason="""Levit does not output attentions""" ) def A_ (self ) -> List[str]: pass def A_ (self ) -> Union[str, Any]: UpperCamelCase_,UpperCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ : str = model_class(__UpperCamelCase ) UpperCamelCase_ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase_ : Dict = [*signature.parameters.keys()] UpperCamelCase_ : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def A_ (self ) -> Optional[Any]: def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): UpperCamelCase_ : List[str] = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): UpperCamelCase_ : Union[str, Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) UpperCamelCase_ : Optional[Any] = outputs.hidden_states UpperCamelCase_ : Optional[int] = len(self.model_tester.depths ) + 1 self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) UpperCamelCase_ : Tuple = (self.model_tester.image_size, self.model_tester.image_size) UpperCamelCase_,UpperCamelCase_ : Optional[int] = image_size[0], image_size[1] for _ in range(4 ): UpperCamelCase_ : Dict = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) UpperCamelCase_ : List[str] = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [ height * width, self.model_tester.hidden_sizes[0], ] , ) UpperCamelCase_,UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ : Union[str, Any] = True check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase_ : List[str] = True check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def A_ (self ) -> Dict: pass def A_ (self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> Tuple: UpperCamelCase_ : List[str] = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def A_ (self ) -> Tuple: UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def A_ (self ) -> List[Any]: UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) def A_ (self ) -> Optional[Any]: if not self.model_tester.is_training: return UpperCamelCase_,UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase_ : List[Any] = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__UpperCamelCase ) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue UpperCamelCase_ : int = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.train() UpperCamelCase_ : List[str] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) UpperCamelCase_ : int = model(**__UpperCamelCase ).loss loss.backward() def A_ (self ) -> Union[str, Any]: UpperCamelCase_,UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCamelCase_ : Tuple = False UpperCamelCase_ : str = True for model_class in self.all_model_classes: if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue UpperCamelCase_ : str = model_class(__UpperCamelCase ) model.gradient_checkpointing_enable() model.to(__UpperCamelCase ) model.train() UpperCamelCase_ : Optional[Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) UpperCamelCase_ : Optional[Any] = model(**__UpperCamelCase ).loss loss.backward() def A_ (self ) -> Union[str, Any]: UpperCamelCase_,UpperCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase_ : Any = [ {"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float}, {"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long}, {"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__UpperCamelCase ), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ): UpperCamelCase_ : Any = problem_type["""title"""] UpperCamelCase_ : Dict = problem_type["""num_labels"""] UpperCamelCase_ : Dict = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.train() UpperCamelCase_ : Optional[int] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) if problem_type["num_labels"] > 1: UpperCamelCase_ : Union[str, Any] = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] ) UpperCamelCase_ : Tuple = inputs["""labels"""].to(problem_type["""dtype"""] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__UpperCamelCase ) as warning_list: UpperCamelCase_ : str = model(**__UpperCamelCase ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def A_ (self ) -> Dict: for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase_ : Any = LevitModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def lowerCAmelCase_ ( ): UpperCamelCase_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class UpperCamelCase ( unittest.TestCase ): @cached_property def A_ (self ) -> Any: return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def A_ (self ) -> str: UpperCamelCase_ : Tuple = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( __UpperCamelCase ) UpperCamelCase_ : Optional[Any] = self.default_image_processor UpperCamelCase_ : List[str] = prepare_img() UpperCamelCase_ : Optional[int] = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): UpperCamelCase_ : Union[str, Any] = model(**__UpperCamelCase ) # verify the logits UpperCamelCase_ : List[str] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) UpperCamelCase_ : Any = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
635
0
'''simple docstring''' import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class UpperCamelCase__( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" _A = VideoToVideoSDPipeline _A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"} _A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"} _A = PipelineTesterMixin.required_optional_params - {"latents"} _A = False # No `output_type`. _A = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def _a ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) A =UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , ) A =DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , ) torch.manual_seed(0 ) A =AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) A =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , ) A =CLIPTextModel(snake_case__ ) A =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) A ={ "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def _a ( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : Tuple=0 ): """simple docstring""" A =floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) if str(snake_case__ ).startswith("mps" ): A =torch.manual_seed(snake_case__ ) else: A =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) A ={ "prompt": "A painting of a squirrel eating a burger", "video": video, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def _a ( self : Optional[Any] ): """simple docstring""" A ="cpu" # ensure determinism for the device-dependent torch.Generator A =self.get_dummy_components() A =VideoToVideoSDPipeline(**snake_case__ ) A =sd_pipe.to(snake_case__ ) sd_pipe.set_progress_bar_config(disable=snake_case__ ) A =self.get_dummy_inputs(snake_case__ ) A ="np" A =sd_pipe(**snake_case__ ).frames A =frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) A =np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def _a ( self : Optional[Any] ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ , expected_max_diff=5E-3 ) @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def _a ( self : Any ): """simple docstring""" pass @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def _a ( self : Any ): """simple docstring""" pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." ) def _a ( self : Tuple ): """simple docstring""" pass def _a ( self : Dict ): """simple docstring""" return super().test_progress_bar() @slow @skip_mps class UpperCamelCase__( unittest.TestCase ): """simple docstring""" def _a ( self : Dict ): """simple docstring""" A =VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames A =torch.Generator(device="cpu" ).manual_seed(0 ) A =torch.randn((1, 10, 3, 10_24, 5_76) , generator=snake_case__ ) A =video.to("cuda" ) A ="Spiderman is surfing" A =pipe(snake_case__ , video=snake_case__ , generator=snake_case__ , num_inference_steps=3 , output_type="pt" ).frames A =np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
702
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def UpperCamelCase_ ( a_ ) ->Tuple: A =FileLock(str(tmpdir / "foo.lock" ) ) A =FileLock(str(tmpdir / "foo.lock" ) ) A =0.01 with locka.acquire(): with pytest.raises(a_ ): A =time.time() locka.acquire(a_ ) assert time.time() - _start > timeout def UpperCamelCase_ ( a_ ) ->List[Any]: A ="a" * 1000 + ".lock" A =FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(".lock" ) assert not locka._lock_file.endswith(a_ ) assert len(os.path.basename(locka._lock_file ) ) <= 255 A =FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(a_ ): locka.acquire(0 )
689
0
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __magic_name__ = abspath(join(dirname(dirname(dirname(__file__))), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def _lowerCAmelCase ( A__: List[str] ): '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(A__ ) def _lowerCAmelCase ( A__: Tuple ): '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main UpperCAmelCase = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(A__ , id=A__ )
254
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class lowercase ( A__ ): '''simple docstring''' def __init__( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = True , _snake_case = None , _snake_case = False , _snake_case = None , _snake_case = True , _snake_case = "arrow" , **_snake_case , ) -> Union[str, Any]: """simple docstring""" super().__init__( split=_snake_case , features=_snake_case , cache_dir=_snake_case , keep_in_memory=_snake_case , streaming=_snake_case , **_snake_case , ) UpperCAmelCase = load_from_cache_file UpperCAmelCase = file_format UpperCAmelCase = Spark( df=_snake_case , features=_snake_case , cache_dir=_snake_case , working_dir=_snake_case , **_snake_case , ) def snake_case_ ( self ) -> Union[str, Any]: """simple docstring""" if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) UpperCAmelCase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=_snake_case , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
254
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCamelCase_ ( _lowerCamelCase ): lowerCAmelCase_ = ['''image_processor''', '''tokenizer'''] lowerCAmelCase_ = '''ChineseCLIPImageProcessor''' lowerCAmelCase_ = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> int: _snake_case = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , lowerCAmelCase_ , ) _snake_case = kwargs.pop('feature_extractor' ) _snake_case = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = self.image_processor def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Optional[Any]: if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: _snake_case = self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ) if images is not None: _snake_case = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ) if text is not None and images is not None: _snake_case = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ ) def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict: return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) def lowerCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> List[Any]: return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) @property def lowerCAmelCase ( self ) -> Optional[int]: _snake_case = self.tokenizer.model_input_names _snake_case = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def lowerCAmelCase ( self ) -> Any: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase_ , ) return self.image_processor_class
704
import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() UpperCAmelCase_ = 2 class UpperCamelCase_ : def __init__( self , *, # begin keyword-only arguments lowerCAmelCase_="<s>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_=None , ) -> Optional[int]: _snake_case , _snake_case , _snake_case , _snake_case = bos, unk, pad, eos _snake_case = [] _snake_case = [] _snake_case = {} _snake_case = self.add_symbol(lowerCAmelCase_ ) _snake_case = self.add_symbol(lowerCAmelCase_ ) _snake_case = self.add_symbol(lowerCAmelCase_ ) _snake_case = self.add_symbol(lowerCAmelCase_ ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(lowerCAmelCase_ ) _snake_case = len(self.symbols ) def __eq__( self , lowerCAmelCase_ ) -> List[Any]: return self.indices == other.indices def __getitem__( self , lowerCAmelCase_ ) -> Dict: if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self ) -> Optional[Any]: return len(self.symbols ) def __contains__( self , lowerCAmelCase_ ) -> int: return sym in self.indices @classmethod def lowerCAmelCase ( cls , lowerCAmelCase_ ) -> List[Any]: _snake_case = cls() d.add_from_file(lowerCAmelCase_ ) return d def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=1 , lowerCAmelCase_=False ) -> str: if word in self.indices and not overwrite: _snake_case = self.indices[word] _snake_case = self.count[idx] + n return idx else: _snake_case = len(self.symbols ) _snake_case = idx self.symbols.append(lowerCAmelCase_ ) self.count.append(lowerCAmelCase_ ) return idx def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Optional[Any]: return 0 def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): try: with open(lowerCAmelCase_ , 'r' , encoding='utf-8' ) as fd: self.add_from_file(lowerCAmelCase_ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(lowerCAmelCase_ ) ) return _snake_case = f.readlines() _snake_case = self._load_meta(lowerCAmelCase_ ) for line in lines[indices_start_line:]: try: _snake_case , _snake_case = line.rstrip().rsplit(' ' , 1 ) if field == "#fairseq:overwrite": _snake_case = True _snake_case , _snake_case = line.rsplit(' ' , 1 ) else: _snake_case = False _snake_case = int(lowerCAmelCase_ ) _snake_case = line if word in self and not overwrite: raise RuntimeError( 'Duplicate word found when loading Dictionary: \'{}\'. ' 'Duplicate words can overwrite earlier ones by adding the ' '#fairseq:overwrite flag at the end of the corresponding row ' 'in the dictionary file. If using the Camembert model, please ' 'download an updated copy of the model file.'.format(lowerCAmelCase_ ) ) self.add_symbol(lowerCAmelCase_ , n=lowerCAmelCase_ , overwrite=lowerCAmelCase_ ) except ValueError: raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' ) def lowerCamelCase__ ( UpperCamelCase__ : Union[str, Any] ) -> Tuple: '''simple docstring''' _snake_case = dict((re.sub(R'@@$' , '' , UpperCamelCase__ ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , UpperCamelCase__ ), v) for k, v in d.items() ) _snake_case = '<s> <pad> </s> <unk>'.split() # restore the special tokens for k in keep_keys: del da[F'''{k}</w>'''] _snake_case = d[k] # restore return da def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ) -> List[str]: '''simple docstring''' if not os.path.exists(UpperCamelCase__ ): raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' ) os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) print(F'''Writing results to {pytorch_dump_folder_path}''' ) # handle various types of models _snake_case = os.path.join(UpperCamelCase__ , 'checkpoint.pt' ) if not os.path.isfile(UpperCamelCase__ ): raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' ) _snake_case = torch.load(UpperCamelCase__ , map_location='cpu' ) _snake_case = chkpt['cfg']['model'] # dicts _snake_case = os.path.join(UpperCamelCase__ , 'dict.txt' ) if not os.path.isfile(UpperCamelCase__ ): raise ValueError(F'''path to the file {dict_file} does not exist!''' ) _snake_case = Dictionary.load(UpperCamelCase__ ) _snake_case = rewrite_dict_keys(src_dict.indices ) _snake_case = len(UpperCamelCase__ ) _snake_case = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES['vocab_file'] ) print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' ) with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) ) # merges_file (bpecodes) _snake_case = os.path.join(UpperCamelCase__ , 'bpecodes' ) if not os.path.isfile(UpperCamelCase__ ): raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' ) _snake_case = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES['merges_file'] ) shutil.copyfile(UpperCamelCase__ , UpperCamelCase__ ) # model config _snake_case = os.path.join(UpperCamelCase__ , 'config.json' ) _snake_case = { 'activation_dropout': args['activation_dropout'], 'architectures': ['BioGptForCausalLM'], 'attention_probs_dropout_prob': args['attention_dropout'], 'bos_token_id': 0, 'eos_token_id': 2, 'hidden_act': args['activation_fn'], 'hidden_dropout_prob': args['dropout'], 'hidden_size': args['decoder_embed_dim'], 'initializer_range': 0.02, 'intermediate_size': args['decoder_ffn_embed_dim'], 'layer_norm_eps': 1e-12, 'layerdrop': args['decoder_layerdrop'], 'max_position_embeddings': args['max_target_positions'], 'model_type': 'biogpt', 'num_attention_heads': args['decoder_attention_heads'], 'num_hidden_layers': args['decoder_layers'], 'pad_token_id': 1, 'scale_embedding': not args['no_scale_embedding'], 'tie_word_embeddings': args['share_decoder_input_output_embed'], 'vocab_size': src_vocab_size, } # good hparam defaults to start with print(F'''Generating {biogpt_model_config_file}''' ) with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) ) # tokenizer config _snake_case = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) _snake_case = { 'bos_token': '<s>', 'eos_token': '</s>', 'model_max_length': 1_024, 'pad_token': '<pad>', 'special_tokens_map_file': None, 'tokenizer_class': 'BioGptTokenizer', 'unk_token': '<unk>', } print(F'''Generating {biogpt_tokenizer_config_file}''' ) with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) ) # model _snake_case = chkpt['model'] # remove unneeded keys _snake_case = [ 'decoder.version', ] for k in ignore_keys: model_state_dict.pop(UpperCamelCase__ , UpperCamelCase__ ) _snake_case = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith('output_projection.weight' ): _snake_case = model_state_dict.pop(UpperCamelCase__ ) else: _snake_case = model_state_dict.pop(UpperCamelCase__ ) _snake_case = BioGptConfig.from_pretrained(UpperCamelCase__ ) _snake_case = BioGptForCausalLM(UpperCamelCase__ ) # check that it loads ok model_new.load_state_dict(UpperCamelCase__ ) # save _snake_case = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) print(F'''Generating {pytorch_weights_dump_path}''' ) torch.save(UpperCamelCase__ , UpperCamelCase__ ) print('Conversion is done!' ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--biogpt_checkpoint_path""", default=None, type=str, required=True, help=( """Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,""" """ bpecodes, etc.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase_ = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
541
0
'''simple docstring''' import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py UpperCAmelCase_ = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n" UpperCAmelCase_ = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n" UpperCAmelCase_ = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowercase ( datasets.Metric ): def UpperCamelCase__ ( self ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ), } ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[ 'https://en.wikipedia.org/wiki/BLEU', 'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213', ] , ) def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=4 , UpperCamelCase=False ) -> List[Any]: __a = compute_bleu( reference_corpus=UpperCamelCase , translation_corpus=UpperCamelCase , max_order=UpperCamelCase , smooth=UpperCamelCase ) ((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
539
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase_ = { "configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"], "tokenization_deberta": ["DebertaTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = ["DebertaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "DebertaForMaskedLM", "DebertaForQuestionAnswering", "DebertaForSequenceClassification", "DebertaForTokenClassification", "DebertaModel", "DebertaPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ "TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDebertaForMaskedLM", "TFDebertaForQuestionAnswering", "TFDebertaForSequenceClassification", "TFDebertaForTokenClassification", "TFDebertaModel", "TFDebertaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
539
1
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowerCAmelCase_ ( lowercase_ : Tuple ): '''simple docstring''' return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def lowerCAmelCase_ ( lowercase_ : Any , lowercase_ : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE : Optional[Any] = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue __SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' ) __SCREAMING_SNAKE_CASE : List[Any] = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' ) __SCREAMING_SNAKE_CASE : Dict = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' ) __SCREAMING_SNAKE_CASE : List[Any] = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' ) __SCREAMING_SNAKE_CASE : Tuple = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' ) __SCREAMING_SNAKE_CASE : Optional[int] = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' ) __SCREAMING_SNAKE_CASE : str = key.replace('''image_encoder.module''' , '''flava.image_model''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('''text_encoder.module''' , '''flava.text_model''' ) __SCREAMING_SNAKE_CASE : List[Any] = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' ) __SCREAMING_SNAKE_CASE : List[str] = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' ) __SCREAMING_SNAKE_CASE : Any = key.replace('''text_projection''' , '''flava.text_projection''' ) __SCREAMING_SNAKE_CASE : Tuple = key.replace('''image_projection''' , '''flava.image_projection''' ) __SCREAMING_SNAKE_CASE : Tuple = value.float() for key, value in codebook_state_dict.items(): __SCREAMING_SNAKE_CASE : Union[str, Any] = value return upgrade @torch.no_grad() def lowerCAmelCase_ ( lowercase_ : Any , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Tuple=None ): '''simple docstring''' if config_path is not None: __SCREAMING_SNAKE_CASE : Dict = FlavaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) else: __SCREAMING_SNAKE_CASE : Union[str, Any] = FlavaConfig() __SCREAMING_SNAKE_CASE : Optional[Any] = FlavaForPreTraining(SCREAMING_SNAKE_CASE__ ).eval() __SCREAMING_SNAKE_CASE : str = convert_dalle_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , save_checkpoint=SCREAMING_SNAKE_CASE__ ) if os.path.exists(SCREAMING_SNAKE_CASE__ ): __SCREAMING_SNAKE_CASE : str = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' ) else: __SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' ) __SCREAMING_SNAKE_CASE : Tuple = upgrade_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) hf_model.load_state_dict(SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE : Any = hf_model.state_dict() __SCREAMING_SNAKE_CASE : Any = count_parameters(SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE : int = count_parameters(SCREAMING_SNAKE_CASE__ ) + count_parameters(SCREAMING_SNAKE_CASE__ ) assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''') parser.add_argument('''--codebook_path''', default=None, type=str, help='''Path to flava codebook checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') _lowerCamelCase = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
704
"""simple docstring""" import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def lowerCAmelCase_ ( lowercase_ : dict ): '''simple docstring''' return (data["data"], data["target"]) def lowerCAmelCase_ ( lowercase_ : np.ndarray , lowercase_ : np.ndarray ): '''simple docstring''' __SCREAMING_SNAKE_CASE : Union[str, Any] = XGBClassifier() classifier.fit(lowercase_ , lowercase_ ) return classifier def lowerCAmelCase_ ( ): '''simple docstring''' __SCREAMING_SNAKE_CASE : List[str] = load_iris() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = data_handling(lowercase_ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = train_test_split( lowercase_ , lowercase_ , test_size=0.25 ) __SCREAMING_SNAKE_CASE : Any = iris['''target_names'''] # Create an XGBoost Classifier from the training data __SCREAMING_SNAKE_CASE : Union[str, Any] = xgboost(lowercase_ , lowercase_ ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( lowercase_ , lowercase_ , lowercase_ , display_labels=lowercase_ , cmap='''Blues''' , normalize='''true''' , ) plt.title('''Normalized Confusion Matrix - IRIS Dataset''' ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
401
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) _SCREAMING_SNAKE_CASE = { """configuration_speecht5""": [ """SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""", """SpeechT5Config""", """SpeechT5HifiGanConfig""", ], """feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""], """processing_speecht5""": ["""SpeechT5Processor"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["""SpeechT5Tokenizer"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""", """SpeechT5ForSpeechToText""", """SpeechT5ForSpeechToSpeech""", """SpeechT5ForTextToSpeech""", """SpeechT5Model""", """SpeechT5PreTrainedModel""", """SpeechT5HifiGan""", ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
181
'''simple docstring''' _lowerCAmelCase :Union[str, Any] = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) _lowerCAmelCase :Union[str, Any] = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def __lowerCAmelCase ( a_ , a_ , a_ ) -> float: '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = from_type.lower().strip('s' ) SCREAMING_SNAKE_CASE : Union[str, Any] = to_type.lower().strip('s' ) SCREAMING_SNAKE_CASE : Dict = UNIT_SYMBOL.get(a_ , a_ ) SCREAMING_SNAKE_CASE : Optional[int] = UNIT_SYMBOL.get(a_ , a_ ) if from_sanitized not in METRIC_CONVERSION: SCREAMING_SNAKE_CASE : Any = ( f"""Invalid 'from_type' value: {from_type!r}.\n""" f"""Conversion abbreviations are: {", ".join(a_ )}""" ) raise ValueError(a_ ) if to_sanitized not in METRIC_CONVERSION: SCREAMING_SNAKE_CASE : int = ( f"""Invalid 'to_type' value: {to_type!r}.\n""" f"""Conversion abbreviations are: {", ".join(a_ )}""" ) raise ValueError(a_ ) SCREAMING_SNAKE_CASE : Dict = METRIC_CONVERSION[from_sanitized] SCREAMING_SNAKE_CASE : List[str] = METRIC_CONVERSION[to_sanitized] SCREAMING_SNAKE_CASE : Dict = 1 if from_exponent > to_exponent: SCREAMING_SNAKE_CASE : Any = from_exponent - to_exponent else: SCREAMING_SNAKE_CASE : Tuple = -(to_exponent - from_exponent) return value * pow(10 , a_ ) if __name__ == "__main__": from doctest import testmod testmod()
251
0
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def __magic_name__ ( lowercase ): SCREAMING_SNAKE_CASE_: Union[str, Any] =FileLock(str(tmpdir / """foo.lock""" ) ) SCREAMING_SNAKE_CASE_: Union[str, Any] =FileLock(str(tmpdir / """foo.lock""" ) ) SCREAMING_SNAKE_CASE_: str =0.01 with locka.acquire(): with pytest.raises(__lowerCAmelCase ): SCREAMING_SNAKE_CASE_: str =time.time() locka.acquire(__lowerCAmelCase ) assert time.time() - _start > timeout def __magic_name__ ( lowercase ): SCREAMING_SNAKE_CASE_: int ="""a""" * 1000 + """.lock""" SCREAMING_SNAKE_CASE_: Any =FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(""".lock""" ) assert not locka._lock_file.endswith(__lowerCAmelCase ) assert len(os.path.basename(locka._lock_file ) ) <= 255 SCREAMING_SNAKE_CASE_: Union[str, Any] =FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(__lowerCAmelCase ): locka.acquire(0 )
712
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor def __magic_name__ ( lowercase ): if "cls_token" in name: SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""cls_token""" , """vit.embeddings.cls_token""" ) if "mask_token" in name: SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""mask_token""" , """decoder.mask_token""" ) if "decoder_pos_embed" in name: SCREAMING_SNAKE_CASE_: int =name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" ) if "pos_embed" in name and "decoder" not in name: SCREAMING_SNAKE_CASE_: str =name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" ) if "patch_embed.proj" in name: SCREAMING_SNAKE_CASE_: str =name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: SCREAMING_SNAKE_CASE_: int =name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" ) if "decoder_blocks" in name: SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""decoder_blocks""" , """decoder.decoder_layers""" ) if "blocks" in name: SCREAMING_SNAKE_CASE_: str =name.replace("""blocks""" , """vit.encoder.layer""" ) if "attn.proj" in name: SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""attn""" , """attention.self""" ) if "norm1" in name: SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: SCREAMING_SNAKE_CASE_: int =name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: SCREAMING_SNAKE_CASE_: int =name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: SCREAMING_SNAKE_CASE_: Dict =name.replace("""mlp.fc2""" , """output.dense""" ) if "decoder_embed" in name: SCREAMING_SNAKE_CASE_: List[str] =name.replace("""decoder_embed""" , """decoder.decoder_embed""" ) if "decoder_norm" in name: SCREAMING_SNAKE_CASE_: Tuple =name.replace("""decoder_norm""" , """decoder.decoder_norm""" ) if "decoder_pred" in name: SCREAMING_SNAKE_CASE_: Any =name.replace("""decoder_pred""" , """decoder.decoder_pred""" ) if "norm.weight" in name and "decoder" not in name: SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.weight""" , """vit.layernorm.weight""" ) if "norm.bias" in name and "decoder" not in name: SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.bias""" , """vit.layernorm.bias""" ) return name def __magic_name__ ( lowercase , lowercase ): for key in orig_state_dict.copy().keys(): SCREAMING_SNAKE_CASE_: Optional[int] =orig_state_dict.pop(lowercase ) if "qkv" in key: SCREAMING_SNAKE_CASE_: Dict =key.split(""".""" ) SCREAMING_SNAKE_CASE_: Optional[Any] =int(key_split[1] ) if "decoder_blocks" in key: SCREAMING_SNAKE_CASE_: int =config.decoder_hidden_size SCREAMING_SNAKE_CASE_: Optional[int] ="""decoder.decoder_layers.""" if "weight" in key: SCREAMING_SNAKE_CASE_: Dict =val[:dim, :] SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2, :] SCREAMING_SNAKE_CASE_: str =val[-dim:, :] elif "bias" in key: SCREAMING_SNAKE_CASE_: List[Any] =val[:dim] SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2] SCREAMING_SNAKE_CASE_: List[Any] =val[-dim:] else: SCREAMING_SNAKE_CASE_: Any =config.hidden_size SCREAMING_SNAKE_CASE_: Union[str, Any] ="""vit.encoder.layer.""" if "weight" in key: SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim, :] SCREAMING_SNAKE_CASE_: Optional[Any] =val[dim : dim * 2, :] SCREAMING_SNAKE_CASE_: Dict =val[-dim:, :] elif "bias" in key: SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim] SCREAMING_SNAKE_CASE_: Any =val[dim : dim * 2] SCREAMING_SNAKE_CASE_: Optional[Any] =val[-dim:] else: SCREAMING_SNAKE_CASE_: Tuple =val return orig_state_dict def __magic_name__ ( lowercase , lowercase ): SCREAMING_SNAKE_CASE_: Dict =ViTMAEConfig() if "large" in checkpoint_url: SCREAMING_SNAKE_CASE_: List[Any] =1024 SCREAMING_SNAKE_CASE_: Dict =4096 SCREAMING_SNAKE_CASE_: Tuple =24 SCREAMING_SNAKE_CASE_: int =16 elif "huge" in checkpoint_url: SCREAMING_SNAKE_CASE_: Union[str, Any] =14 SCREAMING_SNAKE_CASE_: Any =1280 SCREAMING_SNAKE_CASE_: Dict =5120 SCREAMING_SNAKE_CASE_: Optional[int] =32 SCREAMING_SNAKE_CASE_: Optional[Any] =16 SCREAMING_SNAKE_CASE_: Tuple =ViTMAEForPreTraining(lowercase ) SCREAMING_SNAKE_CASE_: Optional[int] =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""model"""] SCREAMING_SNAKE_CASE_: Optional[Any] =ViTMAEImageProcessor(size=config.image_size ) SCREAMING_SNAKE_CASE_: str =convert_state_dict(lowercase , lowercase ) model.load_state_dict(lowercase ) model.eval() SCREAMING_SNAKE_CASE_: Tuple ="""https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg""" SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(lowercase , stream=lowercase ).raw ) SCREAMING_SNAKE_CASE_: int =ViTMAEImageProcessor(size=config.image_size ) SCREAMING_SNAKE_CASE_: int =image_processor(images=lowercase , return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowercase ) SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits if "large" in checkpoint_url: SCREAMING_SNAKE_CASE_: Dict =torch.tensor( [[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] ) elif "huge" in checkpoint_url: SCREAMING_SNAKE_CASE_: Tuple =torch.tensor( [[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] ) else: SCREAMING_SNAKE_CASE_: Any =torch.tensor( [[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] ) # verify logits assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowercase ) if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""", type=str, help="""URL of the checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) _UpperCAmelCase = parser.parse_args() convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
36
0
'''simple docstring''' import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): _UpperCamelCase = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right _UpperCamelCase = 128022 _UpperCamelCase = 128028 @require_sentencepiece class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" a_ =MaMaaaTokenizer a_ =False a_ =False a_ =True def _lowercase ( self : Optional[Any] ) -> Any: super().setUp() __lowerCamelCase : Tuple = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>'] __lowerCamelCase : Tuple = dict(zip(_a , range(len(_a ) ) ) ) __lowerCamelCase : int = Path(self.tmpdirname ) save_json(_a , save_dir / VOCAB_FILES_NAMES['vocab_file'] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(_a , save_dir / VOCAB_FILES_NAMES['spm_file'] ) __lowerCamelCase : Optional[int] = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase ( self : Optional[int] , **_a : Dict ) -> int: return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_a ) def _lowercase ( self : Union[str, Any] , _a : str ) -> Tuple: return ( "This is a test", "This is a test", ) def _lowercase ( self : List[Any] ) -> str: __lowerCamelCase : Optional[int] = '</s>' __lowerCamelCase : int = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a ) def _lowercase ( self : Optional[Any] ) -> Optional[Any]: __lowerCamelCase : int = self.get_tokenizer() __lowerCamelCase : List[str] = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '</s>' ) self.assertEqual(vocab_keys[1] , '<unk>' ) self.assertEqual(vocab_keys[-1] , '<s>' ) self.assertEqual(len(_a ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip('Skip this test while all models are still to be uploaded.' ) def _lowercase ( self : Any ) -> Optional[Any]: pass def _lowercase ( self : Union[str, Any] ) -> List[Any]: __lowerCamelCase : Optional[int] = self.get_tokenizer() __lowerCamelCase : List[Any] = tokenizer.tokenize('This is a test' ) self.assertListEqual(_a , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_a ) , [2, 3, 4, 5, 6] , ) __lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(_a , ['▁This', '▁is', '▁a', '▁t', 'est'] ) __lowerCamelCase : Dict = tokenizer.convert_tokens_to_string(_a ) self.assertEqual(_a , 'This is a test' ) @slow def _lowercase ( self : List[str] ) -> Any: # fmt: off __lowerCamelCase : List[str] = {'input_ids': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_a , model_name='facebook/m2m100_418M' , revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' , ) @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase_ ( unittest.TestCase ): """simple docstring""" a_ ="""facebook/m2m100_418M""" a_ =[ """In my opinion, there are two levels of response from the French government.""", """NSA Affair Emphasizes Complete Lack of Debate on Intelligence""", ] a_ =[ """Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""", """L'affaire NSA souligne l'absence totale de débat sur le renseignement""", ] # fmt: off a_ =[EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2] @classmethod def _lowercase ( cls : Optional[Any] ) -> Tuple: __lowerCamelCase : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='en' , tgt_lang='fr' ) __lowerCamelCase : Dict = 1 return cls def _lowercase ( self : Dict ) -> List[Any]: self.assertEqual(self.tokenizer.get_lang_id('ar' ) , 12_8006 ) self.assertEqual(self.tokenizer.get_lang_id('en' ) , 12_8022 ) self.assertEqual(self.tokenizer.get_lang_id('ro' ) , 12_8076 ) self.assertEqual(self.tokenizer.get_lang_id('mr' ) , 12_8063 ) def _lowercase ( self : Tuple ) -> str: __lowerCamelCase : Any = self.tokenizer.get_vocab() self.assertEqual(len(_a ) , self.tokenizer.vocab_size ) self.assertEqual(vocab['<unk>'] , 3 ) self.assertIn(self.tokenizer.get_lang_token('en' ) , _a ) def _lowercase ( self : Union[str, Any] ) -> int: __lowerCamelCase : List[str] = 'en' __lowerCamelCase : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , _a ) def _lowercase ( self : str ) -> List[str]: self.assertIn(_a , self.tokenizer.all_special_ids ) # fmt: off __lowerCamelCase : str = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2] # fmt: on __lowerCamelCase : Any = self.tokenizer.decode(_a , skip_special_tokens=_a ) __lowerCamelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_a ) self.assertEqual(_a , _a ) self.assertNotIn(self.tokenizer.eos_token , _a ) def _lowercase ( self : int ) -> List[Any]: __lowerCamelCase : Tuple = tempfile.mkdtemp() __lowerCamelCase : Optional[int] = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(_a ) __lowerCamelCase : Union[str, Any] = MaMaaaTokenizer.from_pretrained(_a ) self.assertDictEqual(new_tok.lang_token_to_id , _a ) @require_torch def _lowercase ( self : Optional[Any] ) -> List[str]: __lowerCamelCase : int = 'en' __lowerCamelCase : Dict = 'fr' __lowerCamelCase : Any = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_a , return_tensors='pt' ) __lowerCamelCase : Dict = shift_tokens_right( batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: __lowerCamelCase : Optional[Any] = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def _lowercase ( self : Union[str, Any] ) -> int: __lowerCamelCase : List[Any] = 'mr' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) __lowerCamelCase : Optional[int] = 'zh' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def _lowercase ( self : Any ) -> Dict: __lowerCamelCase : Union[str, Any] = 'mr' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('mr' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) __lowerCamelCase : int = 'zh' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('zh' )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def _lowercase ( self : Union[str, Any] ) -> str: __lowerCamelCase : Optional[Any] = self.tokenizer._build_translation_inputs('A test' , return_tensors='pt' , src_lang='en' , tgt_lang='ar' ) self.assertEqual( nested_simplify(_a ) , { # en_XX, A, test, EOS 'input_ids': [[12_8022, 58, 4183, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 12_8006, } , )
459
'''simple docstring''' import functools def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> int: # Validation if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ) or not all(isinstance(_lowerCAmelCase ,_lowerCAmelCase ) for day in days ): raise ValueError('The parameter days should be a list of integers' ) if len(_lowerCAmelCase ) != 3 or not all(isinstance(_lowerCAmelCase ,_lowerCAmelCase ) for cost in costs ): raise ValueError('The parameter costs should be a list of three integers' ) if len(_lowerCAmelCase ) == 0: return 0 if min(_lowerCAmelCase ) <= 0: raise ValueError('All days elements should be greater than 0' ) if max(_lowerCAmelCase ) >= 366: raise ValueError('All days elements should be less than 366' ) __lowerCamelCase : int = set(_lowerCAmelCase ) @functools.cache def dynamic_programming(_lowerCAmelCase ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) ,costs[1] + dynamic_programming(index + 7 ) ,costs[2] + dynamic_programming(index + 30 ) ,) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
459
1
"""simple docstring""" from collections import defaultdict class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] ) -> str: """simple docstring""" _lowerCAmelCase = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 _lowerCAmelCase = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCAmelCase_ ) ) ] _lowerCAmelCase = defaultdict(UpperCAmelCase_ ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 _lowerCAmelCase = (1 << len(UpperCAmelCase_ )) - 1 def __lowerCamelCase ( self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str ) -> str: """simple docstring""" if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement _lowerCAmelCase = self.count_ways_until(UpperCAmelCase_ , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. _lowerCAmelCase = total_ways_util return self.dp[mask][task_no] def __lowerCamelCase ( self : List[Any] , UpperCAmelCase_ : str ) -> Dict: """simple docstring""" for i in range(len(UpperCAmelCase_ ) ): for j in task_performed[i]: self.task[j].append(UpperCAmelCase_ ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": _snake_case = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. _snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
719
"""simple docstring""" def __snake_case ( SCREAMING_SNAKE_CASE: list[int] ): """simple docstring""" _lowerCAmelCase = [] if len(SCREAMING_SNAKE_CASE ) == 1: return [nums.copy()] for _ in range(len(SCREAMING_SNAKE_CASE ) ): _lowerCAmelCase = nums.pop(0 ) _lowerCAmelCase = permute(SCREAMING_SNAKE_CASE ) for perm in permutations: perm.append(SCREAMING_SNAKE_CASE ) result.extend(SCREAMING_SNAKE_CASE ) nums.append(SCREAMING_SNAKE_CASE ) return result def __snake_case ( SCREAMING_SNAKE_CASE: Any ): """simple docstring""" def backtrack(SCREAMING_SNAKE_CASE: Tuple ): if start == len(SCREAMING_SNAKE_CASE ) - 1: output.append(nums[:] ) else: for i in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ): _lowerCAmelCase , _lowerCAmelCase = nums[i], nums[start] backtrack(start + 1 ) _lowerCAmelCase , _lowerCAmelCase = nums[i], nums[start] # backtrack _lowerCAmelCase = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function _snake_case = permutea([1, 2, 3]) print(res) doctest.testmod()
491
0
'''simple docstring''' import math def __lowercase (_lowercase ) -> int: """simple docstring""" if not isinstance(_lowercase, _lowercase ): __lowerCamelCase : int = f"Input value of [number={number}] must be an integer" raise TypeError(_lowercase ) if number < 1: __lowerCamelCase : int = f"Input value of [number={number}] must be > 0" raise ValueError(_lowercase ) elif number == 1: return 3 elif number == 2: return 5 else: __lowerCamelCase : int = int(math.log(number // 3, 2 ) ) + 2 __lowerCamelCase : Tuple = [3, 5] __lowerCamelCase : str = 2 __lowerCamelCase : str = 3 for block in range(1, _lowercase ): for _ in range(_lowercase ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): UpperCAmelCase__ :str = 0 try: UpperCAmelCase__ :List[str] = proth(number) except ValueError: print(f'''ValueError: there is no {number}th Proth number''') continue print(f'''The {number}th Proth number: {value}''')
150
'''simple docstring''' from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class SCREAMING_SNAKE_CASE : snake_case__ : List[Any] = LEDConfig snake_case__ : int = {} snake_case__ : List[str] = 'gelu' def __init__( self : str , A__ : Tuple , A__ : Any=13 , A__ : Optional[int]=7 , A__ : str=True , A__ : Tuple=False , A__ : List[str]=99 , A__ : str=32 , A__ : Optional[Any]=2 , A__ : Optional[Any]=4 , A__ : Union[str, Any]=37 , A__ : Optional[int]=0.1 , A__ : str=0.1 , A__ : List[Any]=20 , A__ : List[Any]=2 , A__ : Optional[int]=1 , A__ : Optional[int]=0 , A__ : Tuple=4 , ): """simple docstring""" __lowerCamelCase : Optional[int] = parent __lowerCamelCase : List[Any] = batch_size __lowerCamelCase : Dict = seq_length __lowerCamelCase : List[str] = is_training __lowerCamelCase : Optional[Any] = use_labels __lowerCamelCase : Optional[int] = vocab_size __lowerCamelCase : List[str] = hidden_size __lowerCamelCase : Optional[Any] = num_hidden_layers __lowerCamelCase : str = num_attention_heads __lowerCamelCase : List[str] = intermediate_size __lowerCamelCase : Optional[int] = hidden_dropout_prob __lowerCamelCase : Tuple = attention_probs_dropout_prob __lowerCamelCase : Tuple = max_position_embeddings __lowerCamelCase : List[str] = eos_token_id __lowerCamelCase : Dict = pad_token_id __lowerCamelCase : Dict = bos_token_id __lowerCamelCase : Optional[int] = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after __lowerCamelCase : Optional[Any] = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests __lowerCamelCase : int = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def a_ ( self : str ): """simple docstring""" __lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __lowerCamelCase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __lowerCamelCase : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 ) __lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase : List[str] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) __lowerCamelCase : Optional[Any] = prepare_led_inputs_dict(A__ , A__ , A__ ) __lowerCamelCase : Tuple = tf.concat( [tf.zeros_like(A__ )[:, :-1], tf.ones_like(A__ )[:, -1:]] , axis=-1 , ) __lowerCamelCase : Optional[Any] = global_attention_mask return config, inputs_dict def a_ ( self : Optional[int] , A__ : Optional[Any] , A__ : Dict ): """simple docstring""" __lowerCamelCase : List[str] = TFLEDModel(config=A__ ).get_decoder() __lowerCamelCase : str = inputs_dict["""input_ids"""] __lowerCamelCase : List[str] = input_ids[:1, :] __lowerCamelCase : Any = inputs_dict["""attention_mask"""][:1, :] __lowerCamelCase : Dict = 1 # first forward pass __lowerCamelCase : str = model(A__ , attention_mask=A__ , use_cache=A__ ) __lowerCamelCase , __lowerCamelCase : Tuple = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __lowerCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) __lowerCamelCase : List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __lowerCamelCase : str = tf.concat([input_ids, next_tokens] , axis=-1 ) __lowerCamelCase : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __lowerCamelCase : Union[str, Any] = model(A__ , attention_mask=A__ )[0] __lowerCamelCase : Tuple = model(A__ , attention_mask=A__ , past_key_values=A__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __lowerCamelCase : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __lowerCamelCase : Dict = output_from_no_past[:, -3:, random_slice_idx] __lowerCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(A__ , A__ , rtol=1e-3 ) def __lowercase (_lowercase, _lowercase, _lowercase, _lowercase=None, _lowercase=None, _lowercase=None, _lowercase=None, ) -> Dict: """simple docstring""" if attention_mask is None: __lowerCamelCase : Optional[Any] = tf.cast(tf.math.not_equal(_lowercase, config.pad_token_id ), tf.inta ) if decoder_attention_mask is None: __lowerCamelCase : List[str] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ), ], axis=-1, ) if head_mask is None: __lowerCamelCase : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __lowerCamelCase : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): snake_case__ : Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () snake_case__ : Optional[int] = (TFLEDForConditionalGeneration,) if is_tf_available() else () snake_case__ : Optional[int] = ( { 'conversational': TFLEDForConditionalGeneration, 'feature-extraction': TFLEDModel, 'summarization': TFLEDForConditionalGeneration, 'text2text-generation': TFLEDForConditionalGeneration, 'translation': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) snake_case__ : Dict = True snake_case__ : Any = False snake_case__ : str = False snake_case__ : Dict = False def a_ ( self : Tuple ): """simple docstring""" __lowerCamelCase : List[Any] = TFLEDModelTester(self ) __lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=A__ ) def a_ ( self : int ): """simple docstring""" self.config_tester.run_common_tests() def a_ ( self : Any ): """simple docstring""" __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*A__ ) def a_ ( self : int ): """simple docstring""" __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() __lowerCamelCase : Dict = tf.zeros_like(inputs_dict["""attention_mask"""] ) __lowerCamelCase : Dict = 2 __lowerCamelCase : List[Any] = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , ) __lowerCamelCase : Tuple = True __lowerCamelCase : List[Any] = self.model_tester.seq_length __lowerCamelCase : Optional[int] = self.model_tester.encoder_seq_length def check_decoder_attentions_output(A__ : Optional[int] ): __lowerCamelCase : List[str] = outputs.decoder_attentions self.assertEqual(len(A__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(A__ : Optional[Any] ): __lowerCamelCase : List[Any] = [t.numpy() for t in outputs.encoder_attentions] __lowerCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(A__ ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(A__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: __lowerCamelCase : Tuple = True __lowerCamelCase : Optional[int] = False __lowerCamelCase : Tuple = False __lowerCamelCase : Dict = model_class(A__ ) __lowerCamelCase : Optional[int] = model(self._prepare_for_class(A__ , A__ ) ) __lowerCamelCase : Optional[Any] = len(A__ ) self.assertEqual(config.output_hidden_states , A__ ) check_encoder_attentions_output(A__ ) if self.is_encoder_decoder: __lowerCamelCase : str = model_class(A__ ) __lowerCamelCase : Optional[int] = model(self._prepare_for_class(A__ , A__ ) ) self.assertEqual(config.output_hidden_states , A__ ) check_decoder_attentions_output(A__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __lowerCamelCase : Union[str, Any] = True __lowerCamelCase : List[Any] = model_class(A__ ) __lowerCamelCase : List[Any] = model(self._prepare_for_class(A__ , A__ ) ) self.assertEqual(config.output_hidden_states , A__ ) check_encoder_attentions_output(A__ ) # Check attention is always last and order is fine __lowerCamelCase : Union[str, Any] = True __lowerCamelCase : int = True __lowerCamelCase : Union[str, Any] = model_class(A__ ) __lowerCamelCase : Union[str, Any] = model(self._prepare_for_class(A__ , A__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A__ ) ) self.assertEqual(model.config.output_hidden_states , A__ ) check_encoder_attentions_output(A__ ) @unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" ) def a_ ( self : Tuple ): """simple docstring""" pass def a_ ( self : Tuple ): """simple docstring""" pass def __lowercase (_lowercase ) -> Any: """simple docstring""" return tf.constant(_lowercase, dtype=tf.intaa ) UpperCAmelCase__ :List[str] = 1e-4 @slow @require_tf class SCREAMING_SNAKE_CASE ( unittest.TestCase ): def a_ ( self : List[str] ): """simple docstring""" __lowerCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led # change to intended input here __lowerCamelCase : List[str] = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) __lowerCamelCase : Dict = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) __lowerCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , A__ , A__ ) __lowerCamelCase : List[Any] = model(**A__ )[0] __lowerCamelCase : Union[str, Any] = (1, 1024, 768) self.assertEqual(output.shape , A__ ) # change to expected output here __lowerCamelCase : Optional[int] = tf.convert_to_tensor( [[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , ) tf.debugging.assert_near(output[:, :3, :3] , A__ , atol=1e-3 ) def a_ ( self : Any ): """simple docstring""" __lowerCamelCase : Optional[Any] = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ) # change to intended input here __lowerCamelCase : Tuple = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) __lowerCamelCase : Dict = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] ) __lowerCamelCase : Union[str, Any] = prepare_led_inputs_dict(model.config , A__ , A__ ) __lowerCamelCase : Optional[int] = model(**A__ )[0] __lowerCamelCase : str = (1, 1024, model.config.vocab_size) self.assertEqual(output.shape , A__ ) # change to expected output here __lowerCamelCase : List[str] = tf.convert_to_tensor( [[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , ) tf.debugging.assert_near(output[:, :3, :3] , A__ , atol=1e-3 , rtol=1e-3 )
150
1
def _A ( __magic_name__ ): if a < 0: raise ValueError("Input value must be a positive integer" ) elif isinstance(__magic_name__ , __magic_name__ ): raise TypeError("Input value must be a 'int' type" ) return bin(__magic_name__ ).count("1" ) if __name__ == "__main__": import doctest doctest.testmod()
706
import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class lowerCAmelCase : @staticmethod def UpperCAmelCase ( *_lowercase :List[Any] , **_lowercase :Tuple ): '''simple docstring''' pass def _A ( __magic_name__ ): lowercase__ = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class lowerCAmelCase ( unittest.TestCase ): __lowerCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def UpperCAmelCase ( self :Dict , _lowercase :str , _lowercase :Union[str, Any] , _lowercase :Tuple ): '''simple docstring''' lowercase__ = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def UpperCAmelCase ( self :List[Any] , _lowercase :Optional[int] , _lowercase :str ): '''simple docstring''' lowercase__ = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" ) self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , _lowercase ) import datasets lowercase__ = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) lowercase__ = depth_estimator( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] ) self.assertEqual( [ {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, ] , _lowercase , ) @require_tf @unittest.skip("Depth estimation is not implemented in TF" ) def UpperCAmelCase ( self :str ): '''simple docstring''' pass @slow @require_torch def UpperCAmelCase ( self :List[Any] ): '''simple docstring''' lowercase__ = "Intel/dpt-large" lowercase__ = pipeline("depth-estimation" , model=_lowercase ) lowercase__ = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" ) lowercase__ = hashimage(outputs["depth"] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 ) @require_torch def UpperCAmelCase ( self :Optional[Any] ): '''simple docstring''' self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
611
0
def SCREAMING_SNAKE_CASE ( snake_case ) -> bool: if number < 0: raise ValueError('number must not be negative' ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
375
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar SCREAMING_SNAKE_CASE_ : Optional[int] = TypeVar('''T''') def SCREAMING_SNAKE_CASE ( snake_case ) -> int: return (position - 1) // 2 def SCREAMING_SNAKE_CASE ( snake_case ) -> int: return (2 * position) + 1 def SCREAMING_SNAKE_CASE ( snake_case ) -> int: return (2 * position) + 2 class snake_case_ ( Generic[T] ): '''simple docstring''' def __init__( self : Any ) -> None: '''simple docstring''' __lowercase = [] __lowercase = {} __lowercase = 0 def __len__( self : Optional[Any] ) -> int: '''simple docstring''' return self.elements def __repr__( self : Optional[Any] ) -> str: '''simple docstring''' return str(self.heap ) def UpperCAmelCase ( self : Union[str, Any] ) -> bool: '''simple docstring''' return self.elements == 0 def UpperCAmelCase ( self : str , __lowerCamelCase : T , __lowerCamelCase : int ) -> None: '''simple docstring''' self.heap.append((elem, weight) ) __lowercase = self.elements self.elements += 1 self._bubble_up(__lowerCamelCase ) def UpperCAmelCase ( self : Dict ) -> T: '''simple docstring''' if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) __lowercase , __lowercase = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: __lowercase , __lowercase = self.heap[0] self._bubble_down(__lowerCamelCase ) return elem def UpperCAmelCase ( self : int , __lowerCamelCase : T , __lowerCamelCase : int ) -> None: '''simple docstring''' __lowercase = self.position_map[elem] __lowercase = (elem, weight) if position > 0: __lowercase = get_parent_position(__lowerCamelCase ) __lowercase , __lowercase = self.heap[parent_position] if parent_weight > weight: self._bubble_up(__lowerCamelCase ) else: self._bubble_down(__lowerCamelCase ) else: self._bubble_down(__lowerCamelCase ) def UpperCAmelCase ( self : Tuple , __lowerCamelCase : T ) -> None: '''simple docstring''' __lowercase = self.position_map[elem] if curr_pos == 0: return None __lowercase = get_parent_position(__lowerCamelCase ) __lowercase , __lowercase = self.heap[curr_pos] __lowercase , __lowercase = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__lowerCamelCase , __lowerCamelCase ) return self._bubble_up(__lowerCamelCase ) return None def UpperCAmelCase ( self : List[str] , __lowerCamelCase : T ) -> None: '''simple docstring''' __lowercase = self.position_map[elem] __lowercase , __lowercase = self.heap[curr_pos] __lowercase = get_child_left_position(__lowerCamelCase ) __lowercase = get_child_right_position(__lowerCamelCase ) if child_left_position < self.elements and child_right_position < self.elements: __lowercase , __lowercase = self.heap[child_left_position] __lowercase , __lowercase = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__lowerCamelCase , __lowerCamelCase ) return self._bubble_down(__lowerCamelCase ) if child_left_position < self.elements: __lowercase , __lowercase = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__lowerCamelCase , __lowerCamelCase ) return self._bubble_down(__lowerCamelCase ) else: return None if child_right_position < self.elements: __lowercase , __lowercase = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__lowerCamelCase , __lowerCamelCase ) return self._bubble_down(__lowerCamelCase ) return None def UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : int ) -> None: '''simple docstring''' __lowercase = self.heap[nodea_pos][0] __lowercase = self.heap[nodea_pos][0] __lowercase , __lowercase = ( self.heap[nodea_pos], self.heap[nodea_pos], ) __lowercase = nodea_pos __lowercase = nodea_pos class snake_case_ ( Generic[T] ): '''simple docstring''' def __init__( self : Optional[Any] ) -> None: '''simple docstring''' __lowercase = {} __lowercase = 0 def __repr__( self : Union[str, Any] ) -> str: '''simple docstring''' return str(self.connections ) def __len__( self : int ) -> int: '''simple docstring''' return self.nodes def UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : T ) -> None: '''simple docstring''' if node not in self.connections: __lowercase = {} self.nodes += 1 def UpperCAmelCase ( self : List[Any] , __lowerCamelCase : T , __lowerCamelCase : T , __lowerCamelCase : int ) -> None: '''simple docstring''' self.add_node(__lowerCamelCase ) self.add_node(__lowerCamelCase ) __lowercase = weight __lowercase = weight def SCREAMING_SNAKE_CASE ( snake_case , ) -> tuple[dict[T, int], dict[T, T | None]]: __lowercase = {node: maxsize for node in graph.connections} __lowercase = {node: None for node in graph.connections} __lowercase = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(snake_case , snake_case ) if priority_queue.is_empty(): return dist, parent # initialization __lowercase = priority_queue.extract_min() __lowercase = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __lowercase = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(snake_case , dist[neighbour] ) __lowercase = node # running prim's algorithm while not priority_queue.is_empty(): __lowercase = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __lowercase = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(snake_case , dist[neighbour] ) __lowercase = node return dist, parent
375
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL snake_case : Optional[int] = logging.get_logger(__name__) class lowerCAmelCase__ ( UpperCAmelCase__ ): __A : List[str] = ["pixel_values"] def __init__( self : Optional[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : bool = True , **_A : Optional[int] , ): super().__init__(**lowerCamelCase__) A__ : Optional[int] = size if size is not None else {"height": 384, "width": 384} A__ : str = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__) A__ : Dict = do_resize A__ : List[str] = size A__ : List[str] = resample A__ : Optional[Any] = do_rescale A__ : List[str] = rescale_factor A__ : Optional[int] = do_normalize A__ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN A__ : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD A__ : Tuple = do_convert_rgb def _lowercase ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ): A__ : Optional[int] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__) if "height" not in size or "width" not in size: raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}') A__ : Optional[Any] = (size["height"], size["width"]) return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__) def _lowercase ( self : Dict , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ): return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__) def _lowercase ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ): return normalize(lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__) def _lowercase ( self : Optional[int] , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : bool = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Dict , ): A__ : Any = do_resize if do_resize is not None else self.do_resize A__ : Any = resample if resample is not None else self.resample A__ : Any = do_rescale if do_rescale is not None else self.do_rescale A__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor A__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize A__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean A__ : Dict = image_std if image_std is not None else self.image_std A__ : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb A__ : List[Any] = size if size is not None else self.size A__ : Dict = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__) A__ : List[Any] = make_list_of_images(lowerCamelCase__) if not valid_images(lowerCamelCase__): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray.") if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") # PIL RGBA images are converted to RGB if do_convert_rgb: A__ : List[Any] = [convert_to_rgb(lowerCamelCase__) for image in images] # All transformations expect numpy arrays. A__ : Any = [to_numpy_array(lowerCamelCase__) for image in images] if do_resize: A__ : Union[str, Any] = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__) for image in images] if do_rescale: A__ : List[Any] = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__) for image in images] if do_normalize: A__ : Dict = [self.normalize(image=lowerCamelCase__ , mean=lowerCamelCase__ , std=lowerCamelCase__) for image in images] A__ : List[str] = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__) for image in images] A__ : List[str] = BatchFeature(data={"pixel_values": images} , tensor_type=lowerCamelCase__) return encoded_outputs
718
snake_case : Tuple = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def snake_case__ ( ) -> None: """simple docstring""" A__ : Union[str, Any] = input("Enter message: " ) A__ : Tuple = input("Enter key [alphanumeric]: " ) A__ : Optional[Any] = input("Encrypt/Decrypt [e/d]: " ) if mode.lower().startswith("e" ): A__ : Tuple = "encrypt" A__ : str = encrypt_message(__lowercase , __lowercase ) elif mode.lower().startswith("d" ): A__ : Optional[Any] = "decrypt" A__ : Union[str, Any] = decrypt_message(__lowercase , __lowercase ) print(F'\n{mode.title()}ed message:' ) print(__lowercase ) def snake_case__ ( __lowercase , __lowercase ) -> str: """simple docstring""" return translate_message(__lowercase , __lowercase , "encrypt" ) def snake_case__ ( __lowercase , __lowercase ) -> str: """simple docstring""" return translate_message(__lowercase , __lowercase , "decrypt" ) def snake_case__ ( __lowercase , __lowercase , __lowercase ) -> str: """simple docstring""" A__ : Dict = [] A__ : Union[str, Any] = 0 A__ : List[Any] = key.upper() for symbol in message: A__ : List[str] = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(__lowercase ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(__lowercase ): A__ : Optional[int] = 0 else: translated.append(__lowercase ) return "".join(__lowercase ) if __name__ == "__main__": main()
182
0
"""simple docstring""" import string import numpy def SCREAMING_SNAKE_CASE ( snake_case, snake_case): return b if a == 0 else greatest_common_divisor(b % a, snake_case) class _A : """simple docstring""" UpperCamelCase_ : List[Any] = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) UpperCamelCase_ : Dict = numpy.vectorize(lambda _UpperCAmelCase : x % 3_6 ) UpperCamelCase_ : Union[str, Any] = numpy.vectorize(_UpperCAmelCase ) def __init__( self : Optional[int] , A_ : numpy.ndarray ) -> None: __snake_case = self.modulus(A_ ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key __snake_case = encrypt_key.shape[0] def lowercase ( self : Optional[int] , A_ : str ) -> int: return self.key_string.index(A_ ) def lowercase ( self : Any , A_ : int ) -> str: return self.key_string[round(A_ )] def lowercase ( self : Union[str, Any] ) -> None: __snake_case = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: __snake_case = det % len(self.key_string ) __snake_case = len(self.key_string ) if greatest_common_divisor(A_ , len(self.key_string ) ) != 1: __snake_case = ( f"determinant modular {req_l} of encryption key({det}) " f"is not co prime w.r.t {req_l}.\nTry another key." ) raise ValueError(A_ ) def lowercase ( self : Union[str, Any] , A_ : str ) -> str: __snake_case = [char for char in text.upper() if char in self.key_string] __snake_case = chars[-1] while len(A_ ) % self.break_key != 0: chars.append(A_ ) return "".join(A_ ) def lowercase ( self : Optional[Any] , A_ : str ) -> str: __snake_case = self.process_text(text.upper() ) __snake_case = '''''' for i in range(0 , len(A_ ) - self.break_key + 1 , self.break_key ): __snake_case = text[i : i + self.break_key] __snake_case = [self.replace_letters(A_ ) for char in batch] __snake_case = numpy.array([vec] ).T __snake_case = self.modulus(self.encrypt_key.dot(A_ ) ).T.tolist()[ 0 ] __snake_case = ''''''.join( self.replace_digits(A_ ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def lowercase ( self : Dict ) -> numpy.ndarray: __snake_case = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: __snake_case = det % len(self.key_string ) __snake_case = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: __snake_case = i break __snake_case = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(A_ ) ) def lowercase ( self : Tuple , A_ : str ) -> str: __snake_case = self.make_decrypt_key() __snake_case = self.process_text(text.upper() ) __snake_case = '''''' for i in range(0 , len(A_ ) - self.break_key + 1 , self.break_key ): __snake_case = text[i : i + self.break_key] __snake_case = [self.replace_letters(A_ ) for char in batch] __snake_case = numpy.array([vec] ).T __snake_case = self.modulus(decrypt_key.dot(A_ ) ).T.tolist()[0] __snake_case = ''''''.join( self.replace_digits(A_ ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def SCREAMING_SNAKE_CASE ( ): __snake_case = int(input('''Enter the order of the encryption key: ''')) __snake_case = [] print('''Enter each row of the encryption key with space separated integers''') for _ in range(snake_case): __snake_case = [int(snake_case) for x in input().split()] hill_matrix.append(snake_case) __snake_case = HillCipher(numpy.array(snake_case)) print('''Would you like to encrypt or decrypt some text? (1 or 2)''') __snake_case = input('''\n1. Encrypt\n2. Decrypt\n''') if option == "1": __snake_case = input('''What text would you like to encrypt?: ''') print('''Your encrypted text is:''') print(hc.encrypt(snake_case)) elif option == "2": __snake_case = input('''What text would you like to decrypt?: ''') print('''Your decrypted text is:''') print(hc.decrypt(snake_case)) if __name__ == "__main__": import doctest doctest.testmod() main()
564
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch __lowercase : Tuple = logging.get_logger(__name__) class _A ( _UpperCAmelCase ): """simple docstring""" UpperCamelCase_ : List[str] = ['''pixel_values'''] def __init__( self : Optional[int] , A_ : bool = True , A_ : Optional[Dict[str, int]] = None , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 255 , A_ : bool = True , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[float, List[float]]] = None , **A_ : str , ) -> None: super().__init__(**A_ ) __snake_case = size if size is not None else {'''shortest_edge''': 256} __snake_case = get_size_dict(A_ , default_to_square=A_ ) __snake_case = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __snake_case = get_size_dict(A_ , param_name='''crop_size''' ) __snake_case = do_resize __snake_case = size __snake_case = resample __snake_case = do_center_crop __snake_case = crop_size __snake_case = do_rescale __snake_case = rescale_factor __snake_case = do_normalize __snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase ( self : List[str] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : List[Any] , ) -> np.ndarray: __snake_case = get_size_dict(A_ , default_to_square=A_ ) if "shortest_edge" not in size: raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) __snake_case = get_resize_output_image_size(A_ , size=size['''shortest_edge'''] , default_to_square=A_ ) return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ ) def lowercase ( self : Tuple , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Union[str, Any] , ) -> np.ndarray: __snake_case = get_size_dict(A_ ) if "height" not in size or "width" not in size: raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" ) return center_crop(A_ , size=(size['''height'''], size['''width''']) , data_format=A_ , **A_ ) def lowercase ( self : Optional[int] , A_ : np.ndarray , A_ : float , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : int ) -> np.ndarray: return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def lowercase ( self : Tuple , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Tuple , ) -> np.ndarray: return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ ) def lowercase ( self : List[Any] , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Dict[str, int] = None , A_ : PILImageResampling = None , A_ : bool = None , A_ : Dict[str, int] = None , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A_ : Dict , ) -> Optional[Any]: __snake_case = do_resize if do_resize is not None else self.do_resize __snake_case = size if size is not None else self.size __snake_case = get_size_dict(A_ , default_to_square=A_ ) __snake_case = resample if resample is not None else self.resample __snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop __snake_case = crop_size if crop_size is not None else self.crop_size __snake_case = get_size_dict(A_ , param_name='''crop_size''' ) __snake_case = do_rescale if do_rescale is not None else self.do_rescale __snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case = do_normalize if do_normalize is not None else self.do_normalize __snake_case = image_mean if image_mean is not None else self.image_mean __snake_case = image_std if image_std is not None else self.image_std __snake_case = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __snake_case = [to_numpy_array(A_ ) for image in images] if do_resize: __snake_case = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images] if do_center_crop: __snake_case = [self.center_crop(image=A_ , size=A_ ) for image in images] if do_rescale: __snake_case = [self.rescale(image=A_ , scale=A_ ) for image in images] if do_normalize: __snake_case = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images] __snake_case = [to_channel_dimension_format(A_ , A_ ) for image in images] __snake_case = {'''pixel_values''': images} return BatchFeature(data=A_ , tensor_type=A_ ) def lowercase ( self : List[str] , A_ : Optional[Any] , A_ : List[Tuple] = None ) -> List[Any]: __snake_case = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(A_ ) != len(A_ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(A_ ): __snake_case = target_sizes.numpy() __snake_case = [] for idx in range(len(A_ ) ): __snake_case = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=A_ ) __snake_case = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(A_ ) else: __snake_case = logits.argmax(dim=1 ) __snake_case = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
564
1
"""simple docstring""" from argparse import ArgumentParser from .env import EnvironmentCommand def a_ ( ): UpperCAmelCase__ = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' ) UpperCAmelCase__ = parser.add_subparsers(help='diffusers-cli command helpers' ) # Register commands EnvironmentCommand.register_subcommand(lowerCamelCase ) # Let's go UpperCAmelCase__ = parser.parse_args() if not hasattr(lowerCamelCase , 'func' ): parser.print_help() exit(1 ) # Run UpperCAmelCase__ = args.func(lowerCamelCase ) service.run() if __name__ == "__main__": main()
632
"""simple docstring""" import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class snake_case ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self : Optional[Any] ): UpperCAmelCase__ = tempfile.mkdtemp() UpperCAmelCase__ = SamImageProcessor() UpperCAmelCase__ = SamProcessor(lowerCamelCase__ ) processor.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self : str ,**lowerCamelCase__ : Dict ): return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase__ ).image_processor def __lowerCAmelCase ( self : Optional[int] ): shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self : Dict ): UpperCAmelCase__ = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] UpperCAmelCase__ = [Image.fromarray(np.moveaxis(lowerCamelCase__ ,0 ,-1 ) ) for x in image_inputs] return image_inputs def __lowerCAmelCase ( self : Optional[Any] ): UpperCAmelCase__ = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ = self.get_image_processor(do_normalize=lowerCamelCase__ ,padding_value=1.0 ) UpperCAmelCase__ = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=lowerCamelCase__ ,padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,lowerCamelCase__ ) def __lowerCAmelCase ( self : Optional[int] ): UpperCAmelCase__ = self.get_image_processor() UpperCAmelCase__ = SamProcessor(image_processor=lowerCamelCase__ ) UpperCAmelCase__ = self.prepare_image_inputs() UpperCAmelCase__ = image_processor(lowerCamelCase__ ,return_tensors='np' ) UpperCAmelCase__ = processor(images=lowerCamelCase__ ,return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) @require_torch def __lowerCAmelCase ( self : Dict ): UpperCAmelCase__ = self.get_image_processor() UpperCAmelCase__ = SamProcessor(image_processor=lowerCamelCase__ ) UpperCAmelCase__ = [torch.ones((1, 3, 5, 5) )] UpperCAmelCase__ = [[1_764, 2_646]] UpperCAmelCase__ = [[683, 1_024]] UpperCAmelCase__ = processor.post_process_masks(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) self.assertEqual(masks[0].shape ,(1, 3, 1_764, 2_646) ) UpperCAmelCase__ = processor.post_process_masks( lowerCamelCase__ ,torch.tensor(lowerCamelCase__ ) ,torch.tensor(lowerCamelCase__ ) ) self.assertEqual(masks[0].shape ,(1, 3, 1_764, 2_646) ) # should also work with np UpperCAmelCase__ = [np.ones((1, 3, 5, 5) )] UpperCAmelCase__ = processor.post_process_masks(lowerCamelCase__ ,np.array(lowerCamelCase__ ) ,np.array(lowerCamelCase__ ) ) self.assertEqual(masks[0].shape ,(1, 3, 1_764, 2_646) ) UpperCAmelCase__ = [[1, 0], [0, 1]] with self.assertRaises(lowerCamelCase__ ): UpperCAmelCase__ = processor.post_process_masks(lowerCamelCase__ ,np.array(lowerCamelCase__ ) ,np.array(lowerCamelCase__ ) ) @require_vision @require_tf class snake_case ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self : List[str] ): UpperCAmelCase__ = tempfile.mkdtemp() UpperCAmelCase__ = SamImageProcessor() UpperCAmelCase__ = SamProcessor(lowerCamelCase__ ) processor.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self : str ,**lowerCamelCase__ : Union[str, Any] ): return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase__ ).image_processor def __lowerCAmelCase ( self : List[Any] ): shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self : Any ): UpperCAmelCase__ = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] UpperCAmelCase__ = [Image.fromarray(np.moveaxis(lowerCamelCase__ ,0 ,-1 ) ) for x in image_inputs] return image_inputs def __lowerCAmelCase ( self : Optional[int] ): UpperCAmelCase__ = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ = self.get_image_processor(do_normalize=lowerCamelCase__ ,padding_value=1.0 ) UpperCAmelCase__ = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=lowerCamelCase__ ,padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,lowerCamelCase__ ) def __lowerCAmelCase ( self : Union[str, Any] ): UpperCAmelCase__ = self.get_image_processor() UpperCAmelCase__ = SamProcessor(image_processor=lowerCamelCase__ ) UpperCAmelCase__ = self.prepare_image_inputs() UpperCAmelCase__ = image_processor(lowerCamelCase__ ,return_tensors='np' ) UpperCAmelCase__ = processor(images=lowerCamelCase__ ,return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) @require_tf def __lowerCAmelCase ( self : Tuple ): UpperCAmelCase__ = self.get_image_processor() UpperCAmelCase__ = SamProcessor(image_processor=lowerCamelCase__ ) UpperCAmelCase__ = [tf.ones((1, 3, 5, 5) )] UpperCAmelCase__ = [[1_764, 2_646]] UpperCAmelCase__ = [[683, 1_024]] UpperCAmelCase__ = processor.post_process_masks(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,return_tensors='tf' ) self.assertEqual(masks[0].shape ,(1, 3, 1_764, 2_646) ) UpperCAmelCase__ = processor.post_process_masks( lowerCamelCase__ ,tf.convert_to_tensor(lowerCamelCase__ ) ,tf.convert_to_tensor(lowerCamelCase__ ) ,return_tensors='tf' ,) self.assertEqual(masks[0].shape ,(1, 3, 1_764, 2_646) ) # should also work with np UpperCAmelCase__ = [np.ones((1, 3, 5, 5) )] UpperCAmelCase__ = processor.post_process_masks( lowerCamelCase__ ,np.array(lowerCamelCase__ ) ,np.array(lowerCamelCase__ ) ,return_tensors='tf' ) self.assertEqual(masks[0].shape ,(1, 3, 1_764, 2_646) ) UpperCAmelCase__ = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): UpperCAmelCase__ = processor.post_process_masks( lowerCamelCase__ ,np.array(lowerCamelCase__ ) ,np.array(lowerCamelCase__ ) ,return_tensors='tf' ) @require_vision @require_torchvision class snake_case ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self : Any ): UpperCAmelCase__ = tempfile.mkdtemp() UpperCAmelCase__ = SamImageProcessor() UpperCAmelCase__ = SamProcessor(lowerCamelCase__ ) processor.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self : Dict ,**lowerCamelCase__ : Any ): return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase__ ).image_processor def __lowerCAmelCase ( self : Optional[Any] ): shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self : List[str] ): UpperCAmelCase__ = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] UpperCAmelCase__ = [Image.fromarray(np.moveaxis(lowerCamelCase__ ,0 ,-1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def __lowerCAmelCase ( self : List[Any] ): UpperCAmelCase__ = self.get_image_processor() UpperCAmelCase__ = SamProcessor(image_processor=lowerCamelCase__ ) UpperCAmelCase__ = np.random.randint(0 ,2 ,size=(1, 3, 5, 5) ).astype(np.floataa ) UpperCAmelCase__ = [tf.convert_to_tensor(lowerCamelCase__ )] UpperCAmelCase__ = [torch.tensor(lowerCamelCase__ )] UpperCAmelCase__ = [[1_764, 2_646]] UpperCAmelCase__ = [[683, 1_024]] UpperCAmelCase__ = processor.post_process_masks( lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,return_tensors='tf' ) UpperCAmelCase__ = processor.post_process_masks( lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,return_tensors='pt' ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def __lowerCAmelCase ( self : Optional[Any] ): UpperCAmelCase__ = self.get_image_processor() UpperCAmelCase__ = SamProcessor(image_processor=lowerCamelCase__ ) UpperCAmelCase__ = self.prepare_image_inputs() UpperCAmelCase__ = image_processor(lowerCamelCase__ ,return_tensors='pt' )['pixel_values'].numpy() UpperCAmelCase__ = processor(images=lowerCamelCase__ ,return_tensors='pt' )['pixel_values'].numpy() UpperCAmelCase__ = image_processor(lowerCamelCase__ ,return_tensors='tf' )['pixel_values'].numpy() UpperCAmelCase__ = processor(images=lowerCamelCase__ ,return_tensors='tf' )['pixel_values'].numpy() self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) ) self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) ) self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ) )
632
1
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() lowercase : List[str] = logging.get_logger() @dataclass class __snake_case : _a : nn.Module _a : List[nn.Module]= field(default_factory=lowerCAmelCase ) _a : list= field(default_factory=lowerCAmelCase ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ): '''simple docstring''' lowercase : int = len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad ) if has_not_submodules: self.traced.append(snake_case ) def __call__( self ,snake_case ): '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(snake_case ) [x.remove() for x in self.handles] return self @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) ) @dataclass class __snake_case : _a : nn.Module _a : nn.Module _a : int= 0 _a : List= field(default_factory=lowerCAmelCase ) _a : List= field(default_factory=lowerCAmelCase ) def __call__( self ,snake_case ): '''simple docstring''' lowercase : Optional[int] = Tracker(self.dest )(snake_case ).parametrized lowercase : str = Tracker(self.src )(snake_case ).parametrized lowercase : int = list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) ) lowercase : List[Any] = list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) ) if len(snake_case ) != len(snake_case ): raise Exception( f"Numbers of operations are different. Source module has {len(snake_case )} operations while" f" destination module has {len(snake_case )}." ) for dest_m, src_m in zip(snake_case ,snake_case ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f"Transfered from={src_m} to={dest_m}" ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True ) -> List[Any]: print(f"Converting {name}..." ) with torch.no_grad(): lowercase : Union[str, Any] = timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ ).eval() lowercase : Dict = ResNetForImageClassification(SCREAMING_SNAKE_CASE__ ).eval() lowercase : Any = ModuleTransfer(src=SCREAMING_SNAKE_CASE__ , dest=SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = torch.randn((1, 3, 224, 224) ) module_transfer(SCREAMING_SNAKE_CASE__ ) assert torch.allclose(from_model(SCREAMING_SNAKE_CASE__ ) , our_model(SCREAMING_SNAKE_CASE__ ).logits ), "The model logits don't match the original one." lowercase : Dict = f"resnet{'-'.join(name.split('resnet' ) )}" print(SCREAMING_SNAKE_CASE__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=SCREAMING_SNAKE_CASE__ , ) # we can use the convnext one lowercase : Optional[int] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=SCREAMING_SNAKE_CASE__ , ) print(f"Pushed {checkpoint_name}" ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True ) -> List[Any]: lowercase : List[Any] = """imagenet-1k-id2label.json""" lowercase : Tuple = 1_000 lowercase : str = (1, num_labels) lowercase : Optional[Any] = """huggingface/label-files""" lowercase : Dict = num_labels lowercase : int = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) ) lowercase : Optional[int] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} lowercase : List[str] = idalabel lowercase : Any = {v: k for k, v in idalabel.items()} lowercase : Any = partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = { """resnet18""": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ), """resnet26""": ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="""bottleneck""" ), """resnet34""": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ), """resnet50""": ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="""bottleneck""" ), """resnet101""": ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="""bottleneck""" ), """resnet152""": ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="""bottleneck""" ), } if model_name: convert_weight_and_push(SCREAMING_SNAKE_CASE__ , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return config, expected_shape if __name__ == "__main__": lowercase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) lowercase : Tuple = parser.parse_args() lowercase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
336
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer lowercase : Dict = logging.get_logger(__name__) lowercase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} # See all BART models at https://huggingface.co/models?filter=bart lowercase : Optional[Any] = { """vocab_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""", }, """merges_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""", }, """tokenizer_file""": { """facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""", """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""", """facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""", """facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""", """facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""", """yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""", }, } lowercase : Tuple = { """facebook/bart-base""": 1024, """facebook/bart-large""": 1024, """facebook/bart-large-mnli""": 1024, """facebook/bart-large-cnn""": 1024, """facebook/bart-large-xsum""": 1024, """yjernite/bart_eli5""": 1024, } class __snake_case ( lowerCAmelCase ): _a : Tuple= VOCAB_FILES_NAMES _a : Any= PRETRAINED_VOCAB_FILES_MAP _a : Union[str, Any]= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a : List[str]= ["input_ids", "attention_mask"] _a : List[Any]= BartTokenizer def __init__( self ,snake_case=None ,snake_case=None ,snake_case=None ,snake_case="replace" ,snake_case="<s>" ,snake_case="</s>" ,snake_case="</s>" ,snake_case="<s>" ,snake_case="<unk>" ,snake_case="<pad>" ,snake_case="<mask>" ,snake_case=False ,snake_case=True ,**snake_case ,): '''simple docstring''' super().__init__( snake_case ,snake_case ,tokenizer_file=snake_case ,errors=snake_case ,bos_token=snake_case ,eos_token=snake_case ,sep_token=snake_case ,cls_token=snake_case ,unk_token=snake_case ,pad_token=snake_case ,mask_token=snake_case ,add_prefix_space=snake_case ,trim_offsets=snake_case ,**snake_case ,) lowercase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" ,snake_case ) != add_prefix_space: lowercase : Union[str, Any] = getattr(snake_case ,pre_tok_state.pop("""type""" ) ) lowercase : Dict = add_prefix_space lowercase : List[str] = pre_tok_class(**snake_case ) lowercase : List[Any] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowercase : int = """post_processor""" lowercase : List[Any] = getattr(self.backend_tokenizer ,snake_case ,snake_case ) if tokenizer_component_instance: lowercase : List[Any] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase : Dict = tuple(state["""sep"""] ) if "cls" in state: lowercase : Dict = tuple(state["""cls"""] ) lowercase : Union[str, Any] = False if state.get("""add_prefix_space""" ,snake_case ) != add_prefix_space: lowercase : List[Any] = add_prefix_space lowercase : List[str] = True if state.get("""trim_offsets""" ,snake_case ) != trim_offsets: lowercase : Optional[int] = trim_offsets lowercase : Dict = True if changes_to_apply: lowercase : int = getattr(snake_case ,state.pop("""type""" ) ) lowercase : Any = component_class(**snake_case ) setattr(self.backend_tokenizer ,snake_case ,snake_case ) @property def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def _SCREAMING_SNAKE_CASE ( self ,snake_case ): '''simple docstring''' lowercase : str = AddedToken(snake_case ,lstrip=snake_case ,rstrip=snake_case ) if isinstance(snake_case ,snake_case ) else value lowercase : Optional[Any] = value def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ): '''simple docstring''' lowercase : Union[str, Any] = kwargs.get("""is_split_into_words""" ,snake_case ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*snake_case ,**snake_case ) def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ): '''simple docstring''' lowercase : Union[str, Any] = kwargs.get("""is_split_into_words""" ,snake_case ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " """to use it with pretokenized inputs.""" ) return super()._encode_plus(*snake_case ,**snake_case ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ): '''simple docstring''' lowercase : Union[str, Any] = self._tokenizer.model.save(snake_case ,name=snake_case ) return tuple(snake_case ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ): '''simple docstring''' lowercase : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ): '''simple docstring''' lowercase : Union[str, Any] = [self.sep_token_id] lowercase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
336
1
"""simple docstring""" def __lowercase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] ): SCREAMING_SNAKE_CASE__ = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def __lowercase ( lowerCamelCase_ : int , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ): SCREAMING_SNAKE_CASE__ = 0 while b > 0: if b & 1: SCREAMING_SNAKE_CASE__ = ((res % c) + (a % c)) % c a += a b >>= 1 return res
701
"""simple docstring""" from math import sqrt def __lowercase ( lowerCamelCase_ : int ): SCREAMING_SNAKE_CASE__ = 0 for i in range(1 , int(sqrt(lowerCamelCase_ ) + 1 ) ): if n % i == 0 and i != sqrt(lowerCamelCase_ ): total += i + n // i elif i == sqrt(lowerCamelCase_ ): total += i return total - n def __lowercase ( lowerCamelCase_ : int = 10000 ): SCREAMING_SNAKE_CASE__ = sum( i for i in range(1 , lowerCamelCase_ ) if sum_of_divisors(sum_of_divisors(lowerCamelCase_ ) ) == i and sum_of_divisors(lowerCamelCase_ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
112
0
import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def SCREAMING_SNAKE_CASE__ ( snake_case__ :Tuple ) -> Tuple: # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4e_00 and cp <= 0x9f_ff) or (cp >= 0x34_00 and cp <= 0x4d_bf) # or (cp >= 0x2_00_00 and cp <= 0x2_a6_df) # or (cp >= 0x2_a7_00 and cp <= 0x2_b7_3f) # or (cp >= 0x2_b7_40 and cp <= 0x2_b8_1f) # or (cp >= 0x2_b8_20 and cp <= 0x2_ce_af) # or (cp >= 0xf9_00 and cp <= 0xfa_ff) or (cp >= 0x2_f8_00 and cp <= 0x2_fa_1f) # ): # return True return False def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Tuple: # word like '180' or '身高' or '神' for char in word: _lowercase = ord(snake_case__ ) if not _is_chinese_char(snake_case__ ): return 0 return 1 def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] ) -> str: _lowercase = set() for token in tokens: _lowercase = len(snake_case__ ) > 1 and is_chinese(snake_case__ ) if chinese_word: word_set.add(snake_case__ ) _lowercase = list(snake_case__ ) return word_list def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :set() ) -> Dict: if not chinese_word_set: return bert_tokens _lowercase = max([len(snake_case__ ) for w in chinese_word_set] ) _lowercase = bert_tokens _lowercase , _lowercase = 0, len(snake_case__ ) while start < end: _lowercase = True if is_chinese(bert_word[start] ): _lowercase = min(end - start , snake_case__ ) for i in range(snake_case__ , 1 , -1 ): _lowercase = ''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): _lowercase = '##' + bert_word[j] _lowercase = start + i _lowercase = False break if single_word: start += 1 return bert_word def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :LTP , snake_case__ :BertTokenizer ) -> Optional[Any]: _lowercase = [] for i in range(0 , len(snake_case__ ) , 100 ): _lowercase = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['cws'] ).cws _lowercase = [get_chinese_word(snake_case__ ) for r in res] ltp_res.extend(snake_case__ ) assert len(snake_case__ ) == len(snake_case__ ) _lowercase = [] for i in range(0 , len(snake_case__ ) , 100 ): _lowercase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=snake_case__ , truncation=snake_case__ , max_length=512 ) bert_res.extend(res['input_ids'] ) assert len(snake_case__ ) == len(snake_case__ ) _lowercase = [] for input_ids, chinese_word in zip(snake_case__ , snake_case__ ): _lowercase = [] for id in input_ids: _lowercase = bert_tokenizer._convert_id_to_token(snake_case__ ) input_tokens.append(snake_case__ ) _lowercase = add_sub_symbol(snake_case__ , snake_case__ ) _lowercase = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(snake_case__ ): if token[:2] == "##": _lowercase = token[2:] # save chinese tokens' pos if len(snake_case__ ) == 1 and _is_chinese_char(ord(snake_case__ ) ): ref_id.append(snake_case__ ) ref_ids.append(snake_case__ ) assert len(snake_case__ ) == len(snake_case__ ) return ref_ids def SCREAMING_SNAKE_CASE__ ( snake_case__ :int ) -> Any: # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , 'r' , encoding='utf-8' ) as f: _lowercase = f.readlines() _lowercase = [line.strip() for line in data if len(snake_case__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' _lowercase = LTP(args.ltp ) # faster in GPU device _lowercase = BertTokenizer.from_pretrained(args.bert ) _lowercase = prepare_ref(snake_case__ , snake_case__ , snake_case__ ) with open(args.save_path , 'w' , encoding='utf-8' ) as f: _lowercase = [json.dumps(snake_case__ ) + '\n' for ref in ref_ids] f.writelines(snake_case__ ) if __name__ == "__main__": snake_case = argparse.ArgumentParser(description="""prepare_chinese_ref""") parser.add_argument( """--file_name""", required=False, type=str, default="""./resources/chinese-demo.txt""", help="""file need process, same as training data in lm""", ) parser.add_argument( """--ltp""", required=False, type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path""", ) parser.add_argument( """--bert""", required=False, type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""", ) parser.add_argument( """--save_path""", required=False, type=str, default="""./resources/ref.txt""", help="""path to save res""", ) snake_case = parser.parse_args() main(args)
67
from queue import PriorityQueue from typing import Any import numpy as np def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): for nxt, d in graph[v]: if nxt in visited_forward: continue __lowercase = cst_fwd.get(_SCREAMING_SNAKE_CASE , np.inf ) __lowercase = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt) ) __lowercase = new_cost_f __lowercase = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: __lowercase = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = -1 __lowercase = set() __lowercase = set() __lowercase = {source: 0} __lowercase = {destination: 0} __lowercase = {source: None} __lowercase = {destination: None} __lowercase = PriorityQueue() __lowercase = PriorityQueue() __lowercase = np.inf queue_forward.put((0, source) ) queue_backward.put((0, destination) ) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): __lowercase , __lowercase = queue_forward.get() visited_forward.add(_SCREAMING_SNAKE_CASE ) __lowercase , __lowercase = queue_backward.get() visited_backward.add(_SCREAMING_SNAKE_CASE ) __lowercase = pass_and_relaxation( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) __lowercase = pass_and_relaxation( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: __lowercase = shortest_distance return shortest_path_distance snake_case__ : Union[str, Any] = { """B""": [["""C""", 1]], """C""": [["""D""", 1]], """D""": [["""F""", 1]], """E""": [["""B""", 1], ["""G""", 2]], """F""": [], """G""": [["""F""", 1]], } snake_case__ : Optional[int] = { """B""": [["""E""", 1]], """C""": [["""B""", 1]], """D""": [["""C""", 1]], """F""": [["""D""", 1], ["""G""", 1]], """E""": [[None, np.inf]], """G""": [["""E""", 2]], } if __name__ == "__main__": import doctest doctest.testmod()
402
0
from maths.prime_check import is_prime def _UpperCamelCase ( lowercase__ ): if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): __SCREAMING_SNAKE_CASE : Dict = F'''Input value of [number={number}] must be an integer''' raise TypeError(lowerCAmelCase__ ) if is_prime(lowerCAmelCase__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
706
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors __lowerCAmelCase : Union[str, Any] =logging.getLogger(__name__) class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = '''sequence-classification''' def __init__( self :int , lowerCAmelCase__ :Any ) -> Optional[Any]: if type(lowerCAmelCase__ ) == dict: __SCREAMING_SNAKE_CASE : Dict = Namespace(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = glue_output_modes[hparams.task] __SCREAMING_SNAKE_CASE : List[Any] = glue_tasks_num_labels[hparams.task] super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , self.mode ) def __magic_name__( self :Union[str, Any] , **lowerCAmelCase__ :Any ) -> Union[str, Any]: return self.model(**lowerCAmelCase__ ) def __magic_name__( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __SCREAMING_SNAKE_CASE : str = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None __SCREAMING_SNAKE_CASE : List[str] = self(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = outputs[0] __SCREAMING_SNAKE_CASE : str = self.trainer.lr_schedulers[0]['''scheduler'''] __SCREAMING_SNAKE_CASE : Optional[int] = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def __magic_name__( self :List[Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE : Tuple = self.hparams __SCREAMING_SNAKE_CASE : int = processors[args.task]() __SCREAMING_SNAKE_CASE : List[Any] = processor.get_labels() for mode in ["train", "dev"]: __SCREAMING_SNAKE_CASE : List[Any] = self._feature_file(lowerCAmelCase__ ) if os.path.exists(lowerCAmelCase__ ) and not args.overwrite_cache: logger.info('''Loading features from cached file %s''' , lowerCAmelCase__ ) else: logger.info('''Creating features from dataset file at %s''' , args.data_dir ) __SCREAMING_SNAKE_CASE : int = ( processor.get_dev_examples(args.data_dir ) if mode == '''dev''' else processor.get_train_examples(args.data_dir ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = convert_examples_to_features( lowerCAmelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info('''Saving features into cached file %s''' , lowerCAmelCase__ ) torch.save(lowerCAmelCase__ , lowerCAmelCase__ ) def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :bool = False ) -> DataLoader: __SCREAMING_SNAKE_CASE : Dict = '''dev''' if mode == '''test''' else mode __SCREAMING_SNAKE_CASE : Any = self._feature_file(lowerCAmelCase__ ) logger.info('''Loading features from cached file %s''' , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : int = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) __SCREAMING_SNAKE_CASE : Dict = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) __SCREAMING_SNAKE_CASE : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , ) def __magic_name__( self :Any , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] ) -> Any: __SCREAMING_SNAKE_CASE : List[str] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: __SCREAMING_SNAKE_CASE : Dict = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None __SCREAMING_SNAKE_CASE : Dict = self(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = outputs[:2] __SCREAMING_SNAKE_CASE : int = logits.detach().cpu().numpy() __SCREAMING_SNAKE_CASE : int = inputs['''labels'''].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __magic_name__( self :List[str] , lowerCAmelCase__ :List[str] ) -> tuple: __SCREAMING_SNAKE_CASE : Dict = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item() __SCREAMING_SNAKE_CASE : int = np.concatenate([x['''pred'''] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": __SCREAMING_SNAKE_CASE : str = np.argmax(lowerCAmelCase__ , axis=1 ) elif self.hparams.glue_output_mode == "regression": __SCREAMING_SNAKE_CASE : List[Any] = np.squeeze(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Any = np.concatenate([x['''target'''] for x in outputs] , axis=0 ) __SCREAMING_SNAKE_CASE : Any = [[] for _ in range(out_label_ids.shape[0] )] __SCREAMING_SNAKE_CASE : Any = [[] for _ in range(out_label_ids.shape[0] )] __SCREAMING_SNAKE_CASE : Optional[int] = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , lowerCAmelCase__ , lowerCAmelCase__ )} __SCREAMING_SNAKE_CASE : Optional[int] = dict(results.items() ) __SCREAMING_SNAKE_CASE : str = results return ret, preds_list, out_label_list def __magic_name__( self :Dict , lowerCAmelCase__ :list ) -> dict: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = self._eval_end(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = ret['''log'''] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __magic_name__( self :Tuple , lowerCAmelCase__ :Union[str, Any] ) -> dict: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = self._eval_end(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = ret['''log'''] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __magic_name__( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any ) -> int: BaseTransformer.add_model_specific_args(lowerCAmelCase__ , lowerCAmelCase__ ) parser.add_argument( '''--max_seq_length''' , default=128 , type=lowerCAmelCase__ , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--task''' , default='''''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''The GLUE task to run''' , ) parser.add_argument( '''--gpus''' , default=0 , type=lowerCAmelCase__ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) return parser def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser() add_generic_args(lowercase__ , os.getcwd() ) __SCREAMING_SNAKE_CASE : List[str] = GLUETransformer.add_model_specific_args(lowercase__ , os.getcwd() ) __SCREAMING_SNAKE_CASE : List[str] = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: __SCREAMING_SNAKE_CASE : Dict = os.path.join( '''./results''' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , ) os.makedirs(args.output_dir ) __SCREAMING_SNAKE_CASE : Optional[Any] = GLUETransformer(lowercase__ ) __SCREAMING_SNAKE_CASE : int = generic_train(lowercase__ , lowercase__ ) # Optionally, predict on dev set and write to output_dir if args.do_predict: __SCREAMING_SNAKE_CASE : str = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=lowercase__ ) ) __SCREAMING_SNAKE_CASE : Tuple = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(lowercase__ ) if __name__ == "__main__": main()
260
0
import os import pytest from transformers.dynamic_module_utils import get_imports SCREAMING_SNAKE_CASE_:Dict = """ import os """ SCREAMING_SNAKE_CASE_:Any = """ def foo(): import os return False """ SCREAMING_SNAKE_CASE_:str = """ def foo(): def bar(): if True: import os return False return bar() """ SCREAMING_SNAKE_CASE_:str = """ import os try: import bar except ImportError: raise ValueError() """ SCREAMING_SNAKE_CASE_:Union[str, Any] = """ import os def foo(): try: import bar except ImportError: raise ValueError() """ SCREAMING_SNAKE_CASE_:Any = """ import os try: import bar except (ImportError, AttributeError): raise ValueError() """ SCREAMING_SNAKE_CASE_:Tuple = """ import os try: import bar except ImportError as e: raise ValueError() """ SCREAMING_SNAKE_CASE_:Tuple = """ import os try: import bar except: raise ValueError() """ SCREAMING_SNAKE_CASE_:Optional[int] = """ import os try: import bar import baz except ImportError: raise ValueError() """ SCREAMING_SNAKE_CASE_:List[str] = """ import os try: import bar import baz except ImportError: x = 1 raise ValueError() """ SCREAMING_SNAKE_CASE_:List[str] = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize("""case""" , _lowerCAmelCase ) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: """simple docstring""" A : Optional[Any] = os.path.join(_lowerCAmelCase , """test_file.py""" ) with open(_lowerCAmelCase , """w""" ) as _tmp_file: _tmp_file.write(_lowerCAmelCase ) A : List[Any] = get_imports(_lowerCAmelCase ) assert parsed_imports == ["os"]
662
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = False, lowerCamelCase__ = False, lowerCamelCase__ = None, **lowerCamelCase__, ): super().__init__( lowerCamelCase__, split=lowerCamelCase__, features=lowerCamelCase__, cache_dir=lowerCamelCase__, keep_in_memory=lowerCamelCase__, streaming=lowerCamelCase__, num_proc=lowerCamelCase__, **lowerCamelCase__, ) A : List[Any] = path_or_paths if isinstance(lowerCamelCase__, lowerCamelCase__ ) else {self.split: path_or_paths} A : str = Text( cache_dir=lowerCamelCase__, data_files=lowerCamelCase__, features=lowerCamelCase__, **lowerCamelCase__, ) def _lowerCAmelCase ( self ): # Build iterable dataset if self.streaming: A : int = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: A : List[str] = None A : Dict = None A : Tuple = None A : Tuple = None self.builder.download_and_prepare( download_config=lowerCamelCase__, download_mode=lowerCamelCase__, verification_mode=lowerCamelCase__, base_path=lowerCamelCase__, num_proc=self.num_proc, ) A : List[str] = self.builder.as_dataset( split=self.split, verification_mode=lowerCamelCase__, in_memory=self.keep_in_memory ) return dataset
662
1
import inspect import unittest from transformers import RegNetConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import RegNetForImageClassification, RegNetModel from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE_=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = image_size __UpperCamelCase = num_channels __UpperCamelCase = embeddings_size __UpperCamelCase = hidden_sizes __UpperCamelCase = depths __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = hidden_act __UpperCamelCase = num_labels __UpperCamelCase = scope __UpperCamelCase = len(SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Tuple: '''simple docstring''' __UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) __UpperCamelCase = self.get_config() return config, pixel_values, labels def A__ ( self )-> Tuple: '''simple docstring''' return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = RegNetModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Tuple: '''simple docstring''' __UpperCamelCase = self.num_labels __UpperCamelCase = RegNetForImageClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A__ ( self )-> Tuple: '''simple docstring''' __UpperCamelCase = self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs __UpperCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" _snake_case = (RegNetModel, RegNetForImageClassification) if is_torch_available() else () _snake_case = ( {'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification} if is_torch_available() else {} ) _snake_case = False _snake_case = False _snake_case = False _snake_case = False def A__ ( self )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = RegNetModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Optional[int]: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self )-> List[Any]: '''simple docstring''' return @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def A__ ( self )-> Union[str, Any]: '''simple docstring''' pass @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def A__ ( self )-> Any: '''simple docstring''' pass def A__ ( self )-> List[str]: '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase = model_class(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCamelCase = [*signature.parameters.keys()] __UpperCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Optional[Any]: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Any: '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase = model_class(config=SCREAMING_SNAKE_CASE_ ) for name, module in model.named_modules(): if isinstance(SCREAMING_SNAKE_CASE_ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , ) def A__ ( self )-> int: '''simple docstring''' def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() with torch.no_grad(): __UpperCamelCase = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) __UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: __UpperCamelCase = layer_type __UpperCamelCase = True check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCamelCase = True check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> str: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) @slow def A__ ( self )-> Any: '''simple docstring''' for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase = RegNetModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def A_ ( ) -> Union[str, Any]: '''simple docstring''' __UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @cached_property def A__ ( self )-> Optional[int]: '''simple docstring''' return ( AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def A__ ( self )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.default_image_processor __UpperCamelCase = prepare_img() __UpperCamelCase = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): __UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits __UpperCamelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
451
import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = 1 @register_to_config def __init__( self , SCREAMING_SNAKE_CASE_ = 1000 , SCREAMING_SNAKE_CASE_ = None )-> Tuple: '''simple docstring''' self.set_timesteps(SCREAMING_SNAKE_CASE_ ) # standard deviation of the initial noise distribution __UpperCamelCase = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. __UpperCamelCase = 4 # running values __UpperCamelCase = [] def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None )-> Optional[int]: '''simple docstring''' __UpperCamelCase = num_inference_steps __UpperCamelCase = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] __UpperCamelCase = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: __UpperCamelCase = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: __UpperCamelCase = torch.sin(steps * math.pi / 2 ) ** 2 __UpperCamelCase = (1.0 - self.betas**2) ** 0.5 __UpperCamelCase = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] __UpperCamelCase = timesteps.to(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = [] def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , )-> Union[SchedulerOutput, Tuple]: '''simple docstring''' if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) __UpperCamelCase = (self.timesteps == timestep).nonzero().item() __UpperCamelCase = timestep_index + 1 __UpperCamelCase = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(SCREAMING_SNAKE_CASE_ ) if len(self.ets ) == 1: __UpperCamelCase = self.ets[-1] elif len(self.ets ) == 2: __UpperCamelCase = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: __UpperCamelCase = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: __UpperCamelCase = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) __UpperCamelCase = self._get_prev_sample(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> torch.FloatTensor: '''simple docstring''' return sample def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[int]: '''simple docstring''' __UpperCamelCase = self.alphas[timestep_index] __UpperCamelCase = self.betas[timestep_index] __UpperCamelCase = self.alphas[prev_timestep_index] __UpperCamelCase = self.betas[prev_timestep_index] __UpperCamelCase = (sample - sigma * ets) / max(SCREAMING_SNAKE_CASE_ , 1E-8 ) __UpperCamelCase = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self )-> Optional[int]: '''simple docstring''' return self.config.num_train_timesteps
451
1
"""simple docstring""" from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. __lowerCAmelCase : str =2_0_0 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. __lowerCAmelCase : Dict =5_0 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. __lowerCAmelCase : List[str] =0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1_0_0_0)) def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> tuple[str, float]: '''simple docstring''' lowercase = len([g for position, g in enumerate(lowerCAmelCase__ ) if g == main_target[position]] ) return (item, float(lowerCAmelCase__ )) def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> tuple[str, str]: '''simple docstring''' lowercase = random.randint(0 , len(lowerCAmelCase__ ) - 1 ) lowercase = parent_a[:random_slice] + parent_a[random_slice:] lowercase = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :list[str] ) -> str: '''simple docstring''' lowercase = list(lowerCAmelCase__ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: lowercase = random.choice(lowerCAmelCase__ ) return "".join(lowerCAmelCase__ ) def UpperCAmelCase__ ( lowerCAmelCase__ :tuple[str, float] , lowerCAmelCase__ :list[tuple[str, float]] , lowerCAmelCase__ :list[str] , ) -> list[str]: '''simple docstring''' lowercase = [] # Generate more children proportionally to the fitness score. lowercase = int(parent_a[1] * 1_0_0 ) + 1 lowercase = 1_0 if child_n >= 1_0 else child_n for _ in range(lowerCAmelCase__ ): lowercase = population_score[random.randint(0 , lowerCAmelCase__ )][0] lowercase , lowercase = crossover(parent_a[0] , lowerCAmelCase__ ) # Append new string to the population list. pop.append(mutate(lowerCAmelCase__ , lowerCAmelCase__ ) ) pop.append(mutate(lowerCAmelCase__ , lowerCAmelCase__ ) ) return pop def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :list[str] , lowerCAmelCase__ :bool = True ) -> tuple[int, int, str]: '''simple docstring''' if N_POPULATION < N_SELECTED: lowercase = f'{N_POPULATION} must be bigger than {N_SELECTED}' raise ValueError(lowerCAmelCase__ ) # Verify that the target contains no genes besides the ones inside genes variable. lowercase = sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowercase = f'{not_in_genes_list} is not in genes list, evolution cannot converge' raise ValueError(lowerCAmelCase__ ) # Generate random starting population. lowercase = [] for _ in range(lowerCAmelCase__ ): population.append("""""".join([random.choice(lowerCAmelCase__ ) for i in range(len(lowerCAmelCase__ ) )] ) ) # Just some logs to know what the algorithms is doing. lowercase , lowercase = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(lowerCAmelCase__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowercase = [evaluate(lowerCAmelCase__ , lowerCAmelCase__ ) for item in population] # Check if there is a matching evolution. lowercase = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[1] , reverse=lowerCAmelCase__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 1_0 == 0: print( f'\nGeneration: {generation}' f'\nTotal Population:{total_population}' f'\nBest score: {population_score[0][1]}' f'\nBest string: {population_score[0][0]}' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowercase = population[: int(N_POPULATION / 3 )] population.clear() population.extend(lowerCAmelCase__ ) # Normalize population score to be between 0 and 1. lowercase = [ (item, score / len(lowerCAmelCase__ )) for item, score in population_score ] # This is selection for i in range(lowerCAmelCase__ ): population.extend(select(population_score[int(lowerCAmelCase__ )] , lowerCAmelCase__ , lowerCAmelCase__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(lowerCAmelCase__ ) > N_POPULATION: break if __name__ == "__main__": __lowerCAmelCase : Any =( """This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!""" ) __lowerCAmelCase : Optional[Any] =list( """ ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm""" """nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\""" ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any =basic(target_str, genes_list) print( F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}""" )
359
"""simple docstring""" import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 __lowerCAmelCase : str =get_tests_dir("""fixtures/dummy_feature_extractor_config.json""") __lowerCAmelCase : Tuple =get_tests_dir("""fixtures/vocab.json""") __lowerCAmelCase : Tuple =get_tests_dir("""fixtures""") class _A ( unittest.TestCase ): snake_case__ : List[Any] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou'] def A__ ( self ): """simple docstring""" lowercase = 0 def A__ ( self ): """simple docstring""" lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaConfig() lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" ) # save in new folder model_config.save_pretrained(__lowerCAmelCase ) processor.save_pretrained(__lowerCAmelCase ) lowercase = AutoProcessor.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) copyfile(__lowerCAmelCase , os.path.join(__lowerCAmelCase , """vocab.json""" ) ) lowercase = AutoProcessor.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaFeatureExtractor() lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" ) lowercase = WavaVecaProcessor(__lowerCAmelCase , __lowerCAmelCase ) # save in new folder processor.save_pretrained(__lowerCAmelCase ) # drop `processor_class` in tokenizer with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , """r""" ) as f: lowercase = json.load(__lowerCAmelCase ) config_dict.pop("""processor_class""" ) with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , """w""" ) as f: f.write(json.dumps(__lowerCAmelCase ) ) lowercase = AutoProcessor.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaFeatureExtractor() lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" ) lowercase = WavaVecaProcessor(__lowerCAmelCase , __lowerCAmelCase ) # save in new folder processor.save_pretrained(__lowerCAmelCase ) # drop `processor_class` in feature extractor with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , """r""" ) as f: lowercase = json.load(__lowerCAmelCase ) config_dict.pop("""processor_class""" ) with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , """w""" ) as f: f.write(json.dumps(__lowerCAmelCase ) ) lowercase = AutoProcessor.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: lowercase = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" ) model_config.save_pretrained(__lowerCAmelCase ) # copy relevant files copyfile(__lowerCAmelCase , os.path.join(__lowerCAmelCase , """vocab.json""" ) ) # create emtpy sample processor with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , """w""" ) as f: f.write("""{}""" ) lowercase = AutoProcessor.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) def A__ ( self ): """simple docstring""" with self.assertRaises(__lowerCAmelCase ): lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__lowerCAmelCase ): lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__lowerCAmelCase ) lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__lowerCAmelCase ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) lowercase = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) lowercase = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase ) lowercase = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) def A__ ( self ): """simple docstring""" try: AutoConfig.register("""custom""" , __lowerCAmelCase ) AutoFeatureExtractor.register(__lowerCAmelCase , __lowerCAmelCase ) AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase ) AutoProcessor.register(__lowerCAmelCase , __lowerCAmelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__lowerCAmelCase ): AutoProcessor.register(__lowerCAmelCase , __lowerCAmelCase ) # Now that the config is registered, it can be used as any other config with the auto-API lowercase = CustomFeatureExtractor.from_pretrained(__lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase = os.path.join(__lowerCAmelCase , """vocab.txt""" ) with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) lowercase = CustomTokenizer(__lowerCAmelCase ) lowercase = CustomProcessor(__lowerCAmelCase , __lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(__lowerCAmelCase ) lowercase = AutoProcessor.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def A__ ( self ): """simple docstring""" class _A ( lowerCAmelCase ): snake_case__ : Optional[Any] = False class _A ( lowerCAmelCase ): snake_case__ : int = False class _A ( lowerCAmelCase ): snake_case__ : List[Any] = 'AutoFeatureExtractor' snake_case__ : List[str] = 'AutoTokenizer' snake_case__ : List[str] = False try: AutoConfig.register("""custom""" , __lowerCAmelCase ) AutoFeatureExtractor.register(__lowerCAmelCase , __lowerCAmelCase ) AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase ) AutoProcessor.register(__lowerCAmelCase , __lowerCAmelCase ) # If remote code is not set, the default is to use local classes. lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__lowerCAmelCase ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. lowercase = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__lowerCAmelCase ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def A__ ( self ): """simple docstring""" lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" ) def A__ ( self ): """simple docstring""" lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" ) self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" ) @is_staging_test class _A ( unittest.TestCase ): snake_case__ : Dict = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou'] @classmethod def A__ ( cls ): """simple docstring""" lowercase = TOKEN HfFolder.save_token(__lowerCAmelCase ) @classmethod def A__ ( cls ): """simple docstring""" try: delete_repo(token=cls._token , repo_id="""test-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" ) except HTTPError: pass def A__ ( self ): """simple docstring""" lowercase = WavaVecaProcessor.from_pretrained(__lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(__lowerCAmelCase , """test-processor""" ) , push_to_hub=__lowerCAmelCase , use_auth_token=self._token ) lowercase = WavaVecaProcessor.from_pretrained(f'{USER}/test-processor' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(__lowerCAmelCase , getattr(new_processor.feature_extractor , __lowerCAmelCase ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def A__ ( self ): """simple docstring""" lowercase = WavaVecaProcessor.from_pretrained(__lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(__lowerCAmelCase , """test-processor-org""" ) , push_to_hub=__lowerCAmelCase , use_auth_token=self._token , organization="""valid_org""" , ) lowercase = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(__lowerCAmelCase , getattr(new_processor.feature_extractor , __lowerCAmelCase ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def A__ ( self ): """simple docstring""" CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() lowercase = CustomFeatureExtractor.from_pretrained(__lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase = os.path.join(__lowerCAmelCase , """vocab.txt""" ) with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) lowercase = CustomTokenizer(__lowerCAmelCase ) lowercase = CustomProcessor(__lowerCAmelCase , __lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(f'{USER}/test-dynamic-processor' , token=self._token ) lowercase = Repository(__lowerCAmelCase , clone_from=f'{USER}/test-dynamic-processor' , token=self._token ) processor.save_pretrained(__lowerCAmelCase ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { """AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""", """AutoProcessor""": """custom_processing.CustomProcessor""", } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) ) as f: lowercase = json.load(__lowerCAmelCase ) self.assertDictEqual( tokenizer_config["""auto_map"""] , { """AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None], """AutoProcessor""": """custom_processing.CustomProcessor""", } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , """custom_feature_extraction.py""" ) ) ) self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , """custom_tokenization.py""" ) ) ) self.assertTrue(os.path.isfile(os.path.join(__lowerCAmelCase , """custom_processing.py""" ) ) ) repo.push_to_hub() lowercase = AutoProcessor.from_pretrained(f'{USER}/test-dynamic-processor' , trust_remote_code=__lowerCAmelCase ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
359
1
"""simple docstring""" import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration A_ : Optional[Any] = pytest.mark.integration A_ : Union[str, Any] = {"comet"} A_ : str = importlib.util.find_spec("fairseq") is not None A_ : Any = {"code_eval"} A_ : Tuple = os.name == "nt" A_ : List[Any] = {"bertscore", "frugalscore", "perplexity"} A_ : Any = importlib.util.find_spec("transformers") is not None def A ( snake_case__ ): '''simple docstring''' @wraps(snake_case__ ) def wrapper(self , snake_case__ ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest("""\"test requires Fairseq\"""" ) else: test_case(self , snake_case__ ) return wrapper def A ( snake_case__ ): '''simple docstring''' @wraps(snake_case__ ) def wrapper(self , snake_case__ ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest("""\"test requires transformers\"""" ) else: test_case(self , snake_case__ ) return wrapper def A ( snake_case__ ): '''simple docstring''' @wraps(snake_case__ ) def wrapper(self , snake_case__ ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest("""\"test not supported on Windows\"""" ) else: test_case(self , snake_case__ ) return wrapper def A ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( A__ ,A__ ,A__ ) @local class lowerCamelCase (parameterized.TestCase ): lowerCamelCase__ : int = {} lowerCamelCase__ : Dict = None @pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" ) @pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" ) def SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : str ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ = """[...]""" SCREAMING_SNAKE_CASE__ = importlib.import_module( datasets.load.metric_module_factory(os.path.join("""metrics""" , __UpperCAmelCase ) ).module_path ) SCREAMING_SNAKE_CASE__ = datasets.load.import_main_class(metric_module.__name__ , dataset=__UpperCAmelCase ) # check parameters SCREAMING_SNAKE_CASE__ = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(__UpperCAmelCase , metric_module.__name__ ): with self.use_local_metrics(): try: SCREAMING_SNAKE_CASE__ = doctest.testmod(__UpperCAmelCase , verbose=__UpperCAmelCase , raise_on_error=__UpperCAmelCase ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Optional[Any] ) -> str: SCREAMING_SNAKE_CASE__ = """[...]""" SCREAMING_SNAKE_CASE__ = importlib.import_module( datasets.load.metric_module_factory(os.path.join("""metrics""" , __UpperCAmelCase ) ).module_path ) # run doctest with self.use_local_metrics(): SCREAMING_SNAKE_CASE__ = doctest.testmod(__UpperCAmelCase , verbose=__UpperCAmelCase , raise_on_error=__UpperCAmelCase ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] ) -> str: if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](__UpperCAmelCase ): yield else: yield @contextmanager def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: def load_local_metric(__UpperCAmelCase : int , *__UpperCAmelCase : int , **__UpperCAmelCase : Optional[int] ): return load_metric(os.path.join("""metrics""" , __UpperCAmelCase ) , *__UpperCAmelCase , **__UpperCAmelCase ) with patch("""datasets.load_metric""" ) as mock_load_metric: SCREAMING_SNAKE_CASE__ = load_local_metric yield @classmethod def SCREAMING_SNAKE_CASE ( cls : Dict , __UpperCAmelCase : int ) -> Optional[Any]: def wrapper(__UpperCAmelCase : List[str] ): SCREAMING_SNAKE_CASE__ = contextmanager(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher("""bleurt""" ) def A ( snake_case__ ): '''simple docstring''' import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags class lowerCamelCase (A__ ): def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Union[str, Any] ) -> List[str]: assert len(input_dict["""input_ids"""] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor: SCREAMING_SNAKE_CASE__ = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher("""bertscore""" ) def A ( snake_case__ ): '''simple docstring''' import torch def bert_cos_score_idf(snake_case__ , snake_case__ , *snake_case__ , **snake_case__ ): return torch.tensor([[1.0, 1.0, 1.0]] * len(snake_case__ ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch("""bert_score.scorer.get_model""" ), patch( """bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf: SCREAMING_SNAKE_CASE__ = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher("""comet""" ) def A ( snake_case__ ): '''simple docstring''' def load_from_checkpoint(snake_case__ ): class lowerCamelCase : def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : Any , *__UpperCAmelCase : str , **__UpperCAmelCase : List[Any] ) -> Optional[int]: assert len(__UpperCAmelCase ) == 2 SCREAMING_SNAKE_CASE__ = [0.19, 0.92] return scores, sum(__UpperCAmelCase ) / len(__UpperCAmelCase ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch("""comet.download_model""" ) as mock_download_model: SCREAMING_SNAKE_CASE__ = None with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint: SCREAMING_SNAKE_CASE__ = load_from_checkpoint yield def A ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = load_metric(os.path.join("""metrics""" , """seqeval""" ) ) SCREAMING_SNAKE_CASE__ = """ERROR""" SCREAMING_SNAKE_CASE__ = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(snake_case__ , match=re.escape(snake_case__ ) ): metric.compute(predictions=[] , references=[] , scheme=snake_case__ )
616
"""simple docstring""" from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
616
1
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" snake_case = 42 class __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" @register_to_config def __init__( self : List[str] , _snake_case : int = 65_536 , _snake_case : Optional[int] = None , _snake_case : Optional[int] = 2 , _snake_case : Optional[Any] = 2 , _snake_case : Dict = 0 , _snake_case : Dict = "fourier" , _snake_case : Any = True , _snake_case : Optional[Any] = False , _snake_case : Tuple = 0.0 , _snake_case : int = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , _snake_case : Optional[Any] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , _snake_case : int = "UNetMidBlock1D" , _snake_case : Any = None , _snake_case : Union[str, Any] = (32, 32, 64) , _snake_case : Any = None , _snake_case : Optional[Any] = 8 , _snake_case : List[Any] = 1 , _snake_case : Optional[int] = False , ) -> List[str]: """simple docstring""" super().__init__() A_ = sample_size # time if time_embedding_type == "fourier": A_ = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=snake_case__ , log=snake_case__ , flip_sin_to_cos=snake_case__ ) A_ = 2 * block_out_channels[0] elif time_embedding_type == "positional": A_ = Timesteps( block_out_channels[0] , flip_sin_to_cos=snake_case__ , downscale_freq_shift=snake_case__ ) A_ = block_out_channels[0] if use_timestep_embedding: A_ = block_out_channels[0] * 4 A_ = TimestepEmbedding( in_channels=snake_case__ , time_embed_dim=snake_case__ , act_fn=snake_case__ , out_dim=block_out_channels[0] , ) A_ = nn.ModuleList([] ) A_ = None A_ = nn.ModuleList([] ) A_ = None # down A_ = in_channels for i, down_block_type in enumerate(snake_case__ ): A_ = output_channel A_ = block_out_channels[i] if i == 0: input_channel += extra_in_channels A_ = i == len(snake_case__ ) - 1 A_ = get_down_block( snake_case__ , num_layers=snake_case__ , in_channels=snake_case__ , out_channels=snake_case__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(snake_case__ ) # mid A_ = get_mid_block( snake_case__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=snake_case__ , add_downsample=snake_case__ , ) # up A_ = list(reversed(snake_case__ ) ) A_ = reversed_block_out_channels[0] if out_block_type is None: A_ = out_channels else: A_ = block_out_channels[0] for i, up_block_type in enumerate(snake_case__ ): A_ = output_channel A_ = ( reversed_block_out_channels[i + 1] if i < len(snake_case__ ) - 1 else final_upsample_channels ) A_ = i == len(snake_case__ ) - 1 A_ = get_up_block( snake_case__ , num_layers=snake_case__ , in_channels=snake_case__ , out_channels=snake_case__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(snake_case__ ) A_ = output_channel # out A_ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 ) A_ = get_out_block( out_block_type=snake_case__ , num_groups_out=snake_case__ , embed_dim=block_out_channels[0] , out_channels=snake_case__ , act_fn=snake_case__ , fc_dim=block_out_channels[-1] // 4 , ) def lowerCamelCase__ ( self : Optional[int] , _snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : Optional[Any] = True , ) -> str: """simple docstring""" A_ = timestep if not torch.is_tensor(snake_case__ ): A_ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(snake_case__ ) and len(timesteps.shape ) == 0: A_ = timesteps[None].to(sample.device ) A_ = self.time_proj(snake_case__ ) if self.config.use_timestep_embedding: A_ = self.time_mlp(snake_case__ ) else: A_ = timestep_embed[..., None] A_ = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) A_ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down A_ = () for downsample_block in self.down_blocks: A_ = downsample_block(hidden_states=snake_case__ , temb=snake_case__ ) down_block_res_samples += res_samples # 3. mid if self.mid_block: A_ = self.mid_block(snake_case__ , snake_case__ ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): A_ = down_block_res_samples[-1:] A_ = down_block_res_samples[:-1] A_ = upsample_block(snake_case__ , res_hidden_states_tuple=snake_case__ , temb=snake_case__ ) # 5. post-process if self.out_block: A_ = self.out_block(snake_case__ , snake_case__ ) if not return_dict: return (sample,) return UNetaDOutput(sample=snake_case__ )
115
'''simple docstring''' import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = tempfile.mkdtemp() _lowerCAmelCase : List[str] = 5 # Realm tok _lowerCAmelCase : str = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'test', 'question', 'this', 'is', 'the', 'first', 'second', 'third', 'fourth', 'fifth', 'record', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] _lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , 'realm_tokenizer' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) _lowerCAmelCase : Dict = os.path.join(snake_case__ , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) _lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , 'realm_block_records' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) def a ( self ): '''simple docstring''' return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer' ) ) def a ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Dict = RealmConfig(num_block_records=self.num_block_records ) return config def a ( self ): '''simple docstring''' _lowerCAmelCase : List[Any] = Dataset.from_dict( { 'id': ['0', '1'], 'question': ['foo', 'bar'], 'answers': [['Foo', 'Bar'], ['Bar']], } ) return dataset def a ( self ): '''simple docstring''' _lowerCAmelCase : str = np.array( [ b'This is the first record', b'This is the second record', b'This is the third record', b'This is the fourth record', b'This is the fifth record', b'This is a longer longer longer record', ] , dtype=snake_case__ , ) return block_records def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.get_config() _lowerCAmelCase : List[Any] = self.get_dummy_retriever() _lowerCAmelCase : Any = retriever.tokenizer _lowerCAmelCase : Tuple = np.array([0, 3] , dtype='long' ) _lowerCAmelCase : Dict = tokenizer(['Test question'] ).input_ids _lowerCAmelCase : int = tokenizer( ['the fourth'] , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , ).input_ids _lowerCAmelCase : Tuple = config.reader_seq_len _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = retriever( snake_case__ , snake_case__ , answer_ids=snake_case__ , max_length=snake_case__ , return_tensors='np' ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(len(snake_case__ ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , ) def a ( self ): '''simple docstring''' _lowerCAmelCase : int = self.get_config() _lowerCAmelCase : int = self.get_dummy_retriever() _lowerCAmelCase : List[str] = retriever.tokenizer _lowerCAmelCase : List[Any] = np.array([0, 3, 5] , dtype='long' ) _lowerCAmelCase : str = tokenizer(['Test question'] ).input_ids _lowerCAmelCase : Union[str, Any] = tokenizer( ['the fourth', 'longer longer'] , add_special_tokens=snake_case__ , return_token_type_ids=snake_case__ , return_attention_mask=snake_case__ , ).input_ids _lowerCAmelCase : Optional[int] = config.reader_seq_len _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = retriever( snake_case__ , snake_case__ , answer_ids=snake_case__ , max_length=snake_case__ , return_tensors='np' ) self.assertEqual([False, True, True] , snake_case__ ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , snake_case__ ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) ) # Test local path _lowerCAmelCase : Union[str, Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records' ) ) self.assertEqual(retriever.block_records[0] , b'This is the first record' ) # Test mocked remote path with patch('transformers.models.realm.retrieval_realm.hf_hub_download' ) as mock_hf_hub_download: _lowerCAmelCase : Any = os.path.join( os.path.join(self.tmpdirname , 'realm_block_records' ) , _REALM_BLOCK_RECORDS_FILENAME ) _lowerCAmelCase : str = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa' ) self.assertEqual(retriever.block_records[0] , b'This is the first record' )
444
0
import requests def _A ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] ): """simple docstring""" lowerCAmelCase__ = {"Content-Type": "application/json"} lowerCAmelCase__ = requests.post(_lowerCAmelCase , json={"text": message_body} , headers=_lowerCAmelCase ) if response.status_code != 200: lowerCAmelCase__ = ( "Request to slack returned an error " F'{response.status_code}, the response is:\n{response.text}' ) raise ValueError(_lowerCAmelCase ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
716
import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def _A ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] ): """simple docstring""" lowerCAmelCase__ = BigBirdConfig.from_json_file(lowerCAmelCase_ ) print(F'Building PyTorch model from configuration: {config}' ) if is_trivia_qa: lowerCAmelCase__ = BigBirdForQuestionAnswering(lowerCAmelCase_ ) else: lowerCAmelCase__ = BigBirdForPreTraining(lowerCAmelCase_ ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(lowerCAmelCase_ , lowerCAmelCase_ , is_trivia_qa=lowerCAmelCase_ ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--big_bird_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained BERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.' ) UpperCamelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
125
0
def __snake_case ( __magic_name__ ): '''simple docstring''' if n_term == "": return [] lowercase = [] for temp in range(int(SCREAMING_SNAKE_CASE__ ) ): series.append(F'''1/{temp + 1}''' if series else "1" ) return series if __name__ == "__main__": _snake_case : Dict = input("Enter the last number (nth term) of the Harmonic Series") print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n") print(harmonic_series(nth_term))
441
import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class A ( UpperCamelCase_ ): def __init__( self : str ) -> Union[str, Any]: """simple docstring""" _lowerCamelCase : Optional[Any] =[] def lowerCamelCase ( self : Dict , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , **lowercase_ : List[Any] ) -> List[Any]: """simple docstring""" self.events.append('on_init_end' ) def lowerCamelCase ( self : int , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Optional[Any] , **lowercase_ : Any ) -> Union[str, Any]: """simple docstring""" self.events.append('on_train_begin' ) def lowerCamelCase ( self : Optional[int] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Tuple , **lowercase_ : str ) -> Optional[Any]: """simple docstring""" self.events.append('on_train_end' ) def lowerCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : int , **lowercase_ : List[str] ) -> Dict: """simple docstring""" self.events.append('on_epoch_begin' ) def lowerCamelCase ( self : Dict , lowercase_ : Any , lowercase_ : Dict , lowercase_ : List[Any] , **lowercase_ : Dict ) -> Optional[Any]: """simple docstring""" self.events.append('on_epoch_end' ) def lowerCamelCase ( self : List[str] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Any , **lowercase_ : Dict ) -> Tuple: """simple docstring""" self.events.append('on_step_begin' ) def lowerCamelCase ( self : List[str] , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> Tuple: """simple docstring""" self.events.append('on_step_end' ) def lowerCamelCase ( self : Tuple , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , **lowercase_ : List[Any] ) -> Any: """simple docstring""" self.events.append('on_evaluate' ) def lowerCamelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] , **lowercase_ : Dict ) -> Dict: """simple docstring""" self.events.append('on_predict' ) def lowerCamelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : List[Any] , **lowercase_ : Optional[int] ) -> Any: """simple docstring""" self.events.append('on_save' ) def lowerCamelCase ( self : str , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : Optional[int] , **lowercase_ : Optional[int] ) -> List[str]: """simple docstring""" self.events.append('on_log' ) def lowerCamelCase ( self : Any , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] , **lowercase_ : str ) -> List[Any]: """simple docstring""" self.events.append('on_prediction_step' ) @require_torch class A ( unittest.TestCase ): def lowerCamelCase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" _lowerCamelCase : Dict =tempfile.mkdtemp() def lowerCamelCase ( self : Union[str, Any] ) -> int: """simple docstring""" shutil.rmtree(self.output_dir ) def lowerCamelCase ( self : Optional[Any] , lowercase_ : Optional[int]=0 , lowercase_ : int=0 , lowercase_ : Dict=64 , lowercase_ : List[str]=64 , lowercase_ : int=None , lowercase_ : Tuple=False , **lowercase_ : Dict ) -> Optional[int]: """simple docstring""" _lowerCamelCase : int =RegressionDataset(length=lowercase_ ) _lowerCamelCase : Optional[Any] =RegressionDataset(length=lowercase_ ) _lowerCamelCase : str =RegressionModelConfig(a=lowercase_ , b=lowercase_ ) _lowerCamelCase : Optional[Any] =RegressionPreTrainedModel(lowercase_ ) _lowerCamelCase : str =TrainingArguments(self.output_dir , disable_tqdm=lowercase_ , report_to=[] , **lowercase_ ) return Trainer( lowercase_ , lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , callbacks=lowercase_ , ) def lowerCamelCase ( self : List[str] , lowercase_ : int , lowercase_ : int ) -> str: """simple docstring""" self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) # Order doesn't matter _lowerCamelCase : Any =sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ ) _lowerCamelCase : Any =sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ ) for cba, cba in zip(lowercase_ , lowercase_ ): if isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ): self.assertEqual(lowercase_ , lowercase_ ) elif isinstance(lowercase_ , lowercase_ ) and not isinstance(lowercase_ , lowercase_ ): self.assertEqual(lowercase_ , cba.__class__ ) elif not isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ): self.assertEqual(cba.__class__ , lowercase_ ) else: self.assertEqual(lowercase_ , lowercase_ ) def lowerCamelCase ( self : Dict , lowercase_ : List[str] ) -> str: """simple docstring""" _lowerCamelCase : List[Any] =['on_init_end', 'on_train_begin'] _lowerCamelCase : Dict =0 _lowerCamelCase : str =len(trainer.get_eval_dataloader() ) _lowerCamelCase : List[Any] =['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate'] for _ in range(trainer.state.num_train_epochs ): expected_events.append('on_epoch_begin' ) for _ in range(lowercase_ ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append('on_log' ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append('on_save' ) expected_events.append('on_epoch_end' ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def lowerCamelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" _lowerCamelCase : Any =self.get_trainer() _lowerCamelCase : Optional[Any] =DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) # Callbacks passed at init are added to the default callbacks _lowerCamelCase : Dict =self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(lowercase_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback _lowerCamelCase : List[str] =self.get_trainer(disable_tqdm=lowercase_ ) _lowerCamelCase : Tuple =DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) def lowerCamelCase ( self : str ) -> Optional[Any]: """simple docstring""" _lowerCamelCase : int =DEFAULT_CALLBACKS.copy() + [ProgressCallback] _lowerCamelCase : List[str] =self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(lowercase_ ) expected_callbacks.remove(lowercase_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) _lowerCamelCase : Tuple =self.get_trainer() _lowerCamelCase : Dict =trainer.pop_callback(lowercase_ ) self.assertEqual(cb.__class__ , lowercase_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) trainer.add_callback(lowercase_ ) expected_callbacks.insert(0 , lowercase_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) # We can also add, pop, or remove by instance _lowerCamelCase : Optional[int] =self.get_trainer() _lowerCamelCase : Any =trainer.callback_handler.callbacks[0] trainer.remove_callback(lowercase_ ) expected_callbacks.remove(lowercase_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) _lowerCamelCase : Any =self.get_trainer() _lowerCamelCase : Optional[int] =trainer.callback_handler.callbacks[0] _lowerCamelCase : Dict =trainer.pop_callback(lowercase_ ) self.assertEqual(lowercase_ , lowercase_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) trainer.add_callback(lowercase_ ) expected_callbacks.insert(0 , lowercase_ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ ) def lowerCamelCase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action='ignore' , category=lowercase_ ) _lowerCamelCase : str =self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() _lowerCamelCase : List[Any] =trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) ) # Independent log/save/eval _lowerCamelCase : str =self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() _lowerCamelCase : Union[str, Any] =trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) ) _lowerCamelCase : int =self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() _lowerCamelCase : Optional[Any] =trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) ) _lowerCamelCase : str =self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' ) trainer.train() _lowerCamelCase : Any =trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) ) _lowerCamelCase : Optional[int] =self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' ) trainer.train() _lowerCamelCase : Optional[int] =trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) ) # A bit of everything _lowerCamelCase : Optional[Any] =self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='steps' , ) trainer.train() _lowerCamelCase : List[str] =trainer.callback_handler.callbacks[-2].events self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) ) # warning should be emitted for duplicated callbacks with patch('transformers.trainer_callback.logger.warning' ) as warn_mock: _lowerCamelCase : Union[str, Any] =self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(lowercase_ ) in warn_mock.call_args[0][0]
464
0
import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("""0.8.3"""): raise Exception("""requires gluonnlp == 0.8.3""") if version.parse(mx.__version__) != version.parse("""1.5.0"""): raise Exception("""requires mxnet == 1.5.0""") logging.set_verbosity_info() lowercase : Tuple = logging.get_logger(__name__) lowercase : List[str] = """The Nymphenburg Palace is a beautiful palace in Munich!""" def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase ): _UpperCamelCase = { '''attention_cell''': '''multi_head''', '''num_layers''': 4, '''units''': 1_024, '''hidden_size''': 768, '''max_length''': 512, '''num_heads''': 8, '''scaled''': True, '''dropout''': 0.1, '''use_residual''': True, '''embed_size''': 1_024, '''embed_dropout''': 0.1, '''word_embed''': None, '''layer_norm_eps''': 1E-5, '''token_type_vocab_size''': 2, } _UpperCamelCase = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py _UpperCamelCase = BERTEncoder( attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=lowerCAmelCase , output_all_encodings=lowerCAmelCase , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , lowerCAmelCase ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later _UpperCamelCase = '''openwebtext_ccnews_stories_books_cased''' # Specify download folder to Gluonnlp's vocab _UpperCamelCase = os.path.join(get_home_dir() , '''models''' ) _UpperCamelCase = _load_vocab(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , cls=lowerCAmelCase ) _UpperCamelCase = nlp.model.BERTModel( lowerCAmelCase , len(lowerCAmelCase ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=lowerCAmelCase , use_token_type_embed=lowerCAmelCase , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=lowerCAmelCase , use_decoder=lowerCAmelCase , ) original_bort.load_parameters(lowerCAmelCase , cast_dtype=lowerCAmelCase , ignore_extra=lowerCAmelCase ) _UpperCamelCase = original_bort._collect_params_with_prefix() # Build our config 🤗 _UpperCamelCase = { '''architectures''': ['''BertForMaskedLM'''], '''attention_probs_dropout_prob''': predefined_args['''dropout'''], '''hidden_act''': '''gelu''', '''hidden_dropout_prob''': predefined_args['''dropout'''], '''hidden_size''': predefined_args['''embed_size'''], '''initializer_range''': 0.0_2, '''intermediate_size''': predefined_args['''hidden_size'''], '''layer_norm_eps''': predefined_args['''layer_norm_eps'''], '''max_position_embeddings''': predefined_args['''max_length'''], '''model_type''': '''bort''', '''num_attention_heads''': predefined_args['''num_heads'''], '''num_hidden_layers''': predefined_args['''num_layers'''], '''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa '''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa '''vocab_size''': len(lowerCAmelCase ), } _UpperCamelCase = BertConfig.from_dict(lowerCAmelCase ) _UpperCamelCase = BertForMaskedLM(lowerCAmelCase ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(lowerCAmelCase ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(lowerCAmelCase , lowerCAmelCase ): _UpperCamelCase = hf_param.shape _UpperCamelCase = to_torch(params[gluon_param] ) _UpperCamelCase = gluon_param.shape assert ( shape_hf == shape_gluon ), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers''' return gluon_param _UpperCamelCase = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' ) _UpperCamelCase = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' ) _UpperCamelCase = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' ) _UpperCamelCase = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) _UpperCamelCase = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): _UpperCamelCase = hf_bort_model.bert.encoder.layer[i] # self attention _UpperCamelCase = layer.attention.self _UpperCamelCase = check_and_map_params( self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' ) _UpperCamelCase = check_and_map_params( self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' ) _UpperCamelCase = check_and_map_params( self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' ) _UpperCamelCase = check_and_map_params( self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' ) _UpperCamelCase = check_and_map_params( self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' ) _UpperCamelCase = check_and_map_params( self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' ) # self attention output _UpperCamelCase = layer.attention.output _UpperCamelCase = check_and_map_params( self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' ) _UpperCamelCase = check_and_map_params( self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' ) _UpperCamelCase = check_and_map_params( self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' ) _UpperCamelCase = check_and_map_params( self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' ) # intermediate _UpperCamelCase = layer.intermediate _UpperCamelCase = check_and_map_params( intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' ) _UpperCamelCase = check_and_map_params( intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' ) # output _UpperCamelCase = layer.output _UpperCamelCase = check_and_map_params( bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' ) _UpperCamelCase = check_and_map_params( bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' ) _UpperCamelCase = check_and_map_params( bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' ) _UpperCamelCase = check_and_map_params( bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models _UpperCamelCase = RobertaTokenizer.from_pretrained('''roberta-base''' ) _UpperCamelCase = tokenizer.encode_plus(lowerCAmelCase )['''input_ids'''] # Get gluon output _UpperCamelCase = mx.nd.array([input_ids] ) _UpperCamelCase = original_bort(inputs=lowerCAmelCase , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(lowerCAmelCase ) _UpperCamelCase = BertModel.from_pretrained(lowerCAmelCase ) hf_bort_model.eval() _UpperCamelCase = tokenizer.encode_plus(lowerCAmelCase , return_tensors='''pt''' ) _UpperCamelCase = hf_bort_model(**lowerCAmelCase )[0] _UpperCamelCase = output_gluon[0].asnumpy() _UpperCamelCase = output_hf[0].detach().numpy() _UpperCamelCase = np.max(np.abs(hf_layer - gluon_layer ) ).item() _UpperCamelCase = np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) if success: print('''✔️ Both model do output the same tensors''' ) else: print('''❌ Both model do **NOT** output the same tensors''' ) print('''Absolute difference is:''' , lowerCAmelCase ) if __name__ == "__main__": lowercase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowercase : List[Any] = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
105
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ): _UpperCamelCase = [] if len(lowerCAmelCase ) == 1: return [nums.copy()] for _ in range(len(lowerCAmelCase ) ): _UpperCamelCase = nums.pop(0 ) _UpperCamelCase = permute(lowerCAmelCase ) for perm in permutations: perm.append(lowerCAmelCase ) result.extend(lowerCAmelCase ) nums.append(lowerCAmelCase ) return result def SCREAMING_SNAKE_CASE ( lowerCAmelCase ): def backtrack(lowerCAmelCase ): if start == len(lowerCAmelCase ) - 1: output.append(nums[:] ) else: for i in range(lowerCAmelCase , len(lowerCAmelCase ) ): _UpperCamelCase , _UpperCamelCase = nums[i], nums[start] backtrack(start + 1 ) _UpperCamelCase , _UpperCamelCase = nums[i], nums[start] # backtrack _UpperCamelCase = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function lowercase : Optional[Any] = permutea([1, 2, 3]) print(res) doctest.testmod()
105
1
"""simple docstring""" import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple: """simple docstring""" __UpperCAmelCase : int = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") __UpperCAmelCase : Optional[int] = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(UpperCamelCase ): os.makedirs(UpperCamelCase ) __UpperCAmelCase : Optional[Any] = model.state_dict() def to_tf_var_name(UpperCamelCase ): for patt, repl in iter(UpperCamelCase ): __UpperCAmelCase : Any = name.replace(UpperCamelCase , UpperCamelCase ) return f"bert/{name}" def create_tf_var(UpperCamelCase , UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : str = tf.dtypes.as_dtype(tensor.dtype ) __UpperCAmelCase : Tuple = tf.get_variable(dtype=UpperCamelCase , shape=tensor.shape , name=UpperCamelCase , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(UpperCamelCase ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: __UpperCAmelCase : Union[str, Any] = to_tf_var_name(UpperCamelCase ) __UpperCAmelCase : List[str] = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): __UpperCAmelCase : Tuple = torch_tensor.T __UpperCAmelCase : str = create_tf_var(tensor=UpperCamelCase , name=UpperCamelCase , session=UpperCamelCase ) tf.keras.backend.set_value(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Dict = session.run(UpperCamelCase ) print(f"Successfully created {tf_name}: {np.allclose(UpperCamelCase , UpperCamelCase )}" ) __UpperCAmelCase : Optional[int] = tf.train.Saver(tf.trainable_variables() ) saver.save(UpperCamelCase , os.path.join(UpperCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) ) def _UpperCamelCase ( UpperCamelCase=None ) -> int: """simple docstring""" __UpperCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("--model_name" , type=UpperCamelCase , required=UpperCamelCase , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=UpperCamelCase , default=UpperCamelCase , required=UpperCamelCase , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=UpperCamelCase , required=UpperCamelCase , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=UpperCamelCase , required=UpperCamelCase , help="Directory in which to save tensorflow model" ) __UpperCAmelCase : List[Any] = parser.parse_args(UpperCamelCase ) __UpperCAmelCase : List[str] = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=UpperCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
77
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} UpperCAmelCase = { '''tokenizer_file''': { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''', }, } UpperCAmelCase = { '''gpt-neox-20b''': 2048, } class lowerCAmelCase ( A ): lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = ["input_ids", "attention_mask"] def __init__( self : Tuple , __lowercase : Union[str, Any]=None , __lowercase : Union[str, Any]=None , __lowercase : str=None , __lowercase : Optional[int]="<|endoftext|>" , __lowercase : List[Any]="<|endoftext|>" , __lowercase : Tuple="<|endoftext|>" , __lowercase : Optional[int]=False , **__lowercase : List[Any] , ): """simple docstring""" super().__init__( __lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , ) __lowercase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , __lowercase ) != add_prefix_space: __lowercase =getattr(__lowercase , pre_tok_state.pop('type' ) ) __lowercase =add_prefix_space __lowercase =pre_tok_class(**__lowercase ) __lowercase =add_prefix_space def snake_case ( self : Optional[Any] , __lowercase : str , __lowercase : Optional[str] = None ): """simple docstring""" __lowercase =self._tokenizer.model.save(__lowercase , name=__lowercase ) return tuple(__lowercase ) def snake_case ( self : Optional[Any] , __lowercase : "Conversation" ): """simple docstring""" __lowercase =[] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase ) + [self.eos_token_id] ) if len(__lowercase ) > self.model_max_length: __lowercase =input_ids[-self.model_max_length :] return input_ids
119
0
import mpmath # for roots of unity import numpy as np class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Optional[int] , __A : int=None , __A : List[Any]=None ): # Input as list snake_case__ : Optional[int] = list(poly_a or [0] )[:] snake_case__ : Optional[int] = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() snake_case__ : Optional[int] = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() snake_case__ : Dict = len(self.polyB ) # Add 0 to make lengths equal a power of 2 snake_case__ : Optional[Any] = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform snake_case__ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product snake_case__ : List[Any] = self.__multiply() def _lowercase ( self : str , __A : str ): snake_case__ : List[Any] = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB] # Corner case if len(__A ) <= 1: return dft[0] # snake_case__ : Union[str, Any] = self.c_max_length // 2 while next_ncol > 0: snake_case__ : Dict = [[] for i in range(__A )] snake_case__ : Optional[Any] = self.root**next_ncol # First half of next step snake_case__ : Any = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__A ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step snake_case__ : str = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__A ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update snake_case__ : List[Any] = new_dft snake_case__ : Dict = next_ncol // 2 return dft[0] def _lowercase ( self : Optional[Any] ): snake_case__ : Tuple = self.__dft("A" ) snake_case__ : int = self.__dft("B" ) snake_case__ : Optional[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT snake_case__ : str = 2 while next_ncol <= self.c_max_length: snake_case__ : List[Any] = [[] for i in range(__A )] snake_case__ : Any = self.root ** (next_ncol // 2) snake_case__ : Tuple = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update snake_case__ : Dict = new_inverse_c next_ncol *= 2 # Unpack snake_case__ : List[str] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self : List[Any] ): snake_case__ : Union[str, Any] = "A = " + " + ".join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) ) snake_case__ : Optional[Any] = "B = " + " + ".join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) ) snake_case__ : Tuple = "A*B = " + " + ".join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) ) return f'''{a}\n{b}\n{c}''' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
25
def SCREAMING_SNAKE_CASE ( snake_case_ : list ): if len(snake_case_ ) <= 1: return lst snake_case__ : List[Any] = 1 while i < len(snake_case_ ): if lst[i - 1] <= lst[i]: i += 1 else: snake_case__, snake_case__ : Tuple = lst[i], lst[i - 1] i -= 1 if i == 0: snake_case__ : Union[str, Any] = 1 return lst if __name__ == "__main__": __lowerCamelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip() __lowerCamelCase : Tuple = [int(item) for item in user_input.split(""",""")] print(gnome_sort(unsorted))
25
1
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo SCREAMING_SNAKE_CASE_:Dict = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ SCREAMING_SNAKE_CASE_:int = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ SCREAMING_SNAKE_CASE_:Tuple = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): '''simple docstring''' def _lowerCAmelCase ( self ): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""", id="""token""" ), id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""", id="""token""" ), id="""sequence""" ), id="""references""" ), } ), ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = 1, lowerCamelCase__ = 4, ): return { "google_bleu": gleu_score.corpus_gleu( list_of_references=lowerCamelCase__, hypotheses=lowerCamelCase__, min_len=lowerCamelCase__, max_len=lowerCamelCase__ ) }
662
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__) def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]: """simple docstring""" def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ): A : Optional[int] = round(val / multiple ) * multiple if max_val is not None and x > max_val: A : Optional[Any] = math.floor(val / multiple ) * multiple if x < min_val: A : Any = math.ceil(val / multiple ) * multiple return x A : Optional[Any] = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size A , A : List[Any] = get_image_size(_lowerCAmelCase ) A , A : List[Any] = output_size # determine new height and width A : Optional[int] = output_height / input_height A : Optional[Any] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width A : Any = scale_width else: # fit height A : int = scale_height A : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase ) A : int = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase ) return (new_height, new_width) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' __lowerCamelCase : Optional[int] = ["pixel_values"] def __init__( self, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = PILImageResampling.BILINEAR, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = True, lowerCamelCase__ = 1 / 255, lowerCamelCase__ = True, lowerCamelCase__ = None, lowerCamelCase__ = None, **lowerCamelCase__, ): super().__init__(**lowerCamelCase__ ) A : int = size if size is not None else {"""height""": 384, """width""": 384} A : str = get_size_dict(lowerCamelCase__ ) A : Optional[Any] = do_resize A : Optional[int] = size A : Union[str, Any] = keep_aspect_ratio A : int = ensure_multiple_of A : Dict = resample A : Optional[Any] = do_rescale A : Any = rescale_factor A : str = do_normalize A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN A : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = False, lowerCamelCase__ = 1, lowerCamelCase__ = PILImageResampling.BICUBIC, lowerCamelCase__ = None, **lowerCamelCase__, ): A : Dict = get_size_dict(lowerCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) A : Optional[Any] = get_resize_output_image_size( lowerCamelCase__, output_size=(size["""height"""], size["""width"""]), keep_aspect_ratio=lowerCamelCase__, multiple=lowerCamelCase__, ) return resize(lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return rescale(lowerCamelCase__, scale=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = None, **lowerCamelCase__, ): return normalize(lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__, data_format=lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = ChannelDimension.FIRST, **lowerCamelCase__, ): A : Union[str, Any] = do_resize if do_resize is not None else self.do_resize A : str = size if size is not None else self.size A : str = get_size_dict(lowerCamelCase__ ) A : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio A : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of A : Tuple = resample if resample is not None else self.resample A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale A : int = rescale_factor if rescale_factor is not None else self.rescale_factor A : int = do_normalize if do_normalize is not None else self.do_normalize A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean A : Optional[int] = image_std if image_std is not None else self.image_std A : Any = make_list_of_images(lowerCamelCase__ ) if not valid_images(lowerCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. A : str = [to_numpy_array(lowerCamelCase__ ) for image in images] if do_resize: A : Dict = [self.resize(image=lowerCamelCase__, size=lowerCamelCase__, resample=lowerCamelCase__ ) for image in images] if do_rescale: A : Optional[Any] = [self.rescale(image=lowerCamelCase__, scale=lowerCamelCase__ ) for image in images] if do_normalize: A : Union[str, Any] = [self.normalize(image=lowerCamelCase__, mean=lowerCamelCase__, std=lowerCamelCase__ ) for image in images] A : Dict = [to_channel_dimension_format(lowerCamelCase__, lowerCamelCase__ ) for image in images] A : Optional[int] = {"""pixel_values""": images} return BatchFeature(data=lowerCamelCase__, tensor_type=lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ): A : Any = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCamelCase__ ) != len(lowerCamelCase__ ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(lowerCamelCase__ ): A : int = target_sizes.numpy() A : Union[str, Any] = [] for idx in range(len(lowerCamelCase__ ) ): A : int = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="""bilinear""", align_corners=lowerCamelCase__ ) A : Tuple = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCamelCase__ ) else: A : List[str] = logits.argmax(dim=1 ) A : str = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
662
1
import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class lowerCAmelCase_ ( unittest.TestCase ): def snake_case ( self ): SCREAMING_SNAKE_CASE_ : List[str] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ : Dict = SamImageProcessor() SCREAMING_SNAKE_CASE_ : List[Any] = SamProcessor(UpperCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) def snake_case ( self ,**snake_case__ ): return AutoProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ).image_processor def snake_case ( self ): shutil.rmtree(self.tmpdirname ) def snake_case ( self ): SCREAMING_SNAKE_CASE_ : Optional[int] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] SCREAMING_SNAKE_CASE_ : int = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs] return image_inputs def snake_case ( self ): SCREAMING_SNAKE_CASE_ : List[str] = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 ) SCREAMING_SNAKE_CASE_ : Optional[int] = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ ) def snake_case ( self ): SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor() SCREAMING_SNAKE_CASE_ : int = SamProcessor(image_processor=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processor(UpperCAmelCase_ ,return_tensors='np' ) SCREAMING_SNAKE_CASE_ : str = processor(images=UpperCAmelCase_ ,return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) @require_torch def snake_case ( self ): SCREAMING_SNAKE_CASE_ : str = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Union[str, Any] = SamProcessor(image_processor=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ : str = [torch.ones((1, 3, 5, 5) )] SCREAMING_SNAKE_CASE_ : List[Any] = [[1764, 2646]] SCREAMING_SNAKE_CASE_ : Optional[int] = [[683, 1024]] SCREAMING_SNAKE_CASE_ : int = processor.post_process_masks(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) ) SCREAMING_SNAKE_CASE_ : Tuple = processor.post_process_masks( UpperCAmelCase_ ,torch.tensor(UpperCAmelCase_ ) ,torch.tensor(UpperCAmelCase_ ) ) self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) ) # should also work with np SCREAMING_SNAKE_CASE_ : Union[str, Any] = [np.ones((1, 3, 5, 5) )] SCREAMING_SNAKE_CASE_ : Optional[int] = processor.post_process_masks(UpperCAmelCase_ ,np.array(UpperCAmelCase_ ) ,np.array(UpperCAmelCase_ ) ) self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[1, 0], [0, 1]] with self.assertRaises(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE_ : List[Any] = processor.post_process_masks(UpperCAmelCase_ ,np.array(UpperCAmelCase_ ) ,np.array(UpperCAmelCase_ ) ) @require_vision @require_tf class lowerCAmelCase_ ( unittest.TestCase ): def snake_case ( self ): SCREAMING_SNAKE_CASE_ : List[str] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ : Tuple = SamImageProcessor() SCREAMING_SNAKE_CASE_ : List[Any] = SamProcessor(UpperCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) def snake_case ( self ,**snake_case__ ): return AutoProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ).image_processor def snake_case ( self ): shutil.rmtree(self.tmpdirname ) def snake_case ( self ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] SCREAMING_SNAKE_CASE_ : Optional[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs] return image_inputs def snake_case ( self ): SCREAMING_SNAKE_CASE_ : Tuple = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE_ : List[str] = self.get_image_processor(do_normalize=UpperCAmelCase_ ,padding_value=1.0 ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=UpperCAmelCase_ ,padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,UpperCAmelCase_ ) def snake_case ( self ): SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Optional[Any] = SamProcessor(image_processor=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processor(UpperCAmelCase_ ,return_tensors='np' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(images=UpperCAmelCase_ ,return_tensors='np' ) input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 ) @require_tf def snake_case ( self ): SCREAMING_SNAKE_CASE_ : Tuple = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Optional[int] = SamProcessor(image_processor=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ : Optional[Any] = [tf.ones((1, 3, 5, 5) )] SCREAMING_SNAKE_CASE_ : Optional[int] = [[1764, 2646]] SCREAMING_SNAKE_CASE_ : Optional[int] = [[683, 1024]] SCREAMING_SNAKE_CASE_ : Optional[Any] = processor.post_process_masks(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,return_tensors='tf' ) self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) ) SCREAMING_SNAKE_CASE_ : int = processor.post_process_masks( UpperCAmelCase_ ,tf.convert_to_tensor(UpperCAmelCase_ ) ,tf.convert_to_tensor(UpperCAmelCase_ ) ,return_tensors='tf' ,) self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) ) # should also work with np SCREAMING_SNAKE_CASE_ : List[Any] = [np.ones((1, 3, 5, 5) )] SCREAMING_SNAKE_CASE_ : int = processor.post_process_masks( UpperCAmelCase_ ,np.array(UpperCAmelCase_ ) ,np.array(UpperCAmelCase_ ) ,return_tensors='tf' ) self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) ) SCREAMING_SNAKE_CASE_ : Optional[Any] = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): SCREAMING_SNAKE_CASE_ : List[Any] = processor.post_process_masks( UpperCAmelCase_ ,np.array(UpperCAmelCase_ ) ,np.array(UpperCAmelCase_ ) ,return_tensors='tf' ) @require_vision @require_torchvision class lowerCAmelCase_ ( unittest.TestCase ): def snake_case ( self ): SCREAMING_SNAKE_CASE_ : str = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ : Union[str, Any] = SamImageProcessor() SCREAMING_SNAKE_CASE_ : List[Any] = SamProcessor(UpperCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) def snake_case ( self ,**snake_case__ ): return AutoProcessor.from_pretrained(self.tmpdirname ,**UpperCAmelCase_ ).image_processor def snake_case ( self ): shutil.rmtree(self.tmpdirname ) def snake_case ( self ): SCREAMING_SNAKE_CASE_ : Tuple = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] SCREAMING_SNAKE_CASE_ : int = [Image.fromarray(np.moveaxis(UpperCAmelCase_ ,0 ,-1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def snake_case ( self ): SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor() SCREAMING_SNAKE_CASE_ : int = SamProcessor(image_processor=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ : str = np.random.randint(0 ,2 ,size=(1, 3, 5, 5) ).astype(np.floataa ) SCREAMING_SNAKE_CASE_ : List[Any] = [tf.convert_to_tensor(UpperCAmelCase_ )] SCREAMING_SNAKE_CASE_ : str = [torch.tensor(UpperCAmelCase_ )] SCREAMING_SNAKE_CASE_ : List[Any] = [[1764, 2646]] SCREAMING_SNAKE_CASE_ : List[Any] = [[683, 1024]] SCREAMING_SNAKE_CASE_ : Any = processor.post_process_masks( UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,return_tensors='tf' ) SCREAMING_SNAKE_CASE_ : Tuple = processor.post_process_masks( UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,return_tensors='pt' ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def snake_case ( self ): SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor() SCREAMING_SNAKE_CASE_ : Any = SamProcessor(image_processor=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_ : int = image_processor(UpperCAmelCase_ ,return_tensors='pt' )['pixel_values'].numpy() SCREAMING_SNAKE_CASE_ : Tuple = processor(images=UpperCAmelCase_ ,return_tensors='pt' )['pixel_values'].numpy() SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor(UpperCAmelCase_ ,return_tensors='tf' )['pixel_values'].numpy() SCREAMING_SNAKE_CASE_ : Dict = processor(images=UpperCAmelCase_ ,return_tensors='tf' )['pixel_values'].numpy() self.assertTrue(np.allclose(UpperCAmelCase_ ,UpperCAmelCase_ ) ) self.assertTrue(np.allclose(UpperCAmelCase_ ,UpperCAmelCase_ ) ) self.assertTrue(np.allclose(UpperCAmelCase_ ,UpperCAmelCase_ ) )
706
from math import log from scipy.constants import Boltzmann, physical_constants UpperCamelCase__ : Any = 3_00 # TEMPERATURE (unit = K) def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , ) -> float: """simple docstring""" if donor_conc <= 0: raise ValueError('Donor concentration should be positive' ) elif acceptor_conc <= 0: raise ValueError('Acceptor concentration should be positive' ) elif intrinsic_conc <= 0: raise ValueError('Intrinsic concentration should be positive' ) elif donor_conc <= intrinsic_conc: raise ValueError( 'Donor concentration should be greater than intrinsic concentration' ) elif acceptor_conc <= intrinsic_conc: raise ValueError( 'Acceptor concentration should be greater than intrinsic concentration' ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
685
0
import torch def SCREAMING_SNAKE_CASE ( ) -> Optional[int]: """simple docstring""" if torch.cuda.is_available(): A__ = torch.cuda.device_count() else: A__ = 0 print(f"""Successfully ran on {num_gpus} GPUs""" ) if __name__ == "__main__": main()
87
"""simple docstring""" import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments A_ : Optional[Any] = logging.getLogger(__name__) @dataclass class lowerCamelCase (A__ ): lowerCamelCase__ : Optional[float] = field( default=0.0 ,metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} ) lowerCamelCase__ : bool = field(default=A__ ,metadata={'help': 'Whether to SortishSamler or not.'} ) lowerCamelCase__ : bool = field( default=A__ ,metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} ) lowerCamelCase__ : bool = field(default=A__ ,metadata={'help': 'whether to use adafactor'} ) lowerCamelCase__ : Optional[float] = field( default=A__ ,metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} ) lowerCamelCase__ : Optional[float] = field( default=A__ ,metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} ) lowerCamelCase__ : Optional[float] = field(default=A__ ,metadata={'help': 'Dropout probability. Goes into model.config.'} ) lowerCamelCase__ : Optional[float] = field( default=A__ ,metadata={'help': 'Attention dropout probability. Goes into model.config.'} ) lowerCamelCase__ : Optional[str] = field( default='linear' ,metadata={'help': f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} ,)
196
0
"""simple docstring""" import numpy as np snake_case = [ ['a', 'b', 'c', 'd', 'e'], ['f', 'g', 'h', 'i', 'k'], ['l', 'm', 'n', 'o', 'p'], ['q', 'r', 's', 't', 'u'], ['v', 'w', 'x', 'y', 'z'], ] class UpperCamelCase : """simple docstring""" def __init__( self ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE = np.array(lowercase__ ) def A ( self , lowercase__ ) -> np.ndarray: """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = np.where(letter == self.SQUARE ) SCREAMING_SNAKE_CASE = np.concatenate([indexa + 1, indexa + 1] ) return indexes def A ( self , lowercase__ , lowercase__ ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE = self.SQUARE[indexa - 1, indexa - 1] return letter def A ( self , lowercase__ ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE = message.lower() SCREAMING_SNAKE_CASE = message.replace(' ' , '' ) SCREAMING_SNAKE_CASE = message.replace('j' , 'i' ) SCREAMING_SNAKE_CASE = np.empty((2, len(lowercase__ )) ) for letter_index in range(len(lowercase__ ) ): SCREAMING_SNAKE_CASE = self.letter_to_numbers(message[letter_index] ) SCREAMING_SNAKE_CASE = numbers[0] SCREAMING_SNAKE_CASE = numbers[1] SCREAMING_SNAKE_CASE = first_step.reshape(2 * len(lowercase__ ) ) SCREAMING_SNAKE_CASE = '' for numbers_index in range(len(lowercase__ ) ): SCREAMING_SNAKE_CASE = int(second_step[numbers_index * 2] ) SCREAMING_SNAKE_CASE = int(second_step[(numbers_index * 2) + 1] ) SCREAMING_SNAKE_CASE = self.numbers_to_letter(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE = encoded_message + letter return encoded_message def A ( self , lowercase__ ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE = message.lower() message.replace(' ' , '' ) SCREAMING_SNAKE_CASE = np.empty(2 * len(lowercase__ ) ) for letter_index in range(len(lowercase__ ) ): SCREAMING_SNAKE_CASE = self.letter_to_numbers(message[letter_index] ) SCREAMING_SNAKE_CASE = numbers[0] SCREAMING_SNAKE_CASE = numbers[1] SCREAMING_SNAKE_CASE = first_step.reshape((2, len(lowercase__ )) ) SCREAMING_SNAKE_CASE = '' for numbers_index in range(len(lowercase__ ) ): SCREAMING_SNAKE_CASE = int(second_step[0, numbers_index] ) SCREAMING_SNAKE_CASE = int(second_step[1, numbers_index] ) SCREAMING_SNAKE_CASE = self.numbers_to_letter(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE = decoded_message + letter return decoded_message
406
"""simple docstring""" import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ): # Initialise PyTorch model SCREAMING_SNAKE_CASE = TaConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) print(f'''Building PyTorch model from configuration: {config}''' ) SCREAMING_SNAKE_CASE = TaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ) # Load weights from tf checkpoint load_tf_weights_in_ta(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) snake_case = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
406
1
def lowerCAmelCase_ ( __A ) -> bool: '''simple docstring''' UpperCAmelCase__ = [int(__A ) for i in ip_va_address.split("." ) if i.isdigit()] return len(__A ) == 4 and all(0 <= int(__A ) <= 254 for octet in octets ) if __name__ == "__main__": UpperCamelCase__ = input().strip() UpperCamelCase__ = 'valid' if is_ip_va_address_valid(ip) else 'invalid' print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
486
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL UpperCamelCase__ = logging.get_logger(__name__) def lowerCAmelCase_ ( __A, __A ) -> Any: '''simple docstring''' UpperCAmelCase__ = b.T UpperCAmelCase__ = np.sum(np.square(__A ), axis=1 ) UpperCAmelCase__ = np.sum(np.square(__A ), axis=0 ) UpperCAmelCase__ = np.matmul(__A, __A ) UpperCAmelCase__ = aa[:, None] - 2 * ab + ba[None, :] return d def lowerCAmelCase_ ( __A, __A ) -> Dict: '''simple docstring''' UpperCAmelCase__ = x.reshape(-1, 3 ) UpperCAmelCase__ = squared_euclidean_distance(__A, __A ) return np.argmin(__A, axis=1 ) class A ( UpperCAmelCase_ ): __UpperCAmelCase : Optional[Any] = ['pixel_values'] def __init__(self : Optional[int] , __UpperCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCAmelCase : bool = True , __UpperCAmelCase : bool = True , **__UpperCAmelCase : Tuple , ) -> None: """simple docstring""" super().__init__(**__UpperCAmelCase ) UpperCAmelCase__ = size if size is not None else {"height": 2_5_6, "width": 2_5_6} UpperCAmelCase__ = get_size_dict(__UpperCAmelCase ) UpperCAmelCase__ = np.array(__UpperCAmelCase ) if clusters is not None else None UpperCAmelCase__ = do_resize UpperCAmelCase__ = size UpperCAmelCase__ = resample UpperCAmelCase__ = do_normalize UpperCAmelCase__ = do_color_quantize def lowercase_ (self : int , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Dict[str, int] , __UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Tuple , ) -> np.ndarray: """simple docstring""" UpperCAmelCase__ = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" ) return resize( __UpperCAmelCase , size=(size["height"], size["width"]) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def lowercase_ (self : Tuple , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , ) -> np.ndarray: """simple docstring""" UpperCAmelCase__ = rescale(image=__UpperCAmelCase , scale=1 / 127.5 , data_format=__UpperCAmelCase ) UpperCAmelCase__ = image - 1 return image def lowercase_ (self : Optional[int] , __UpperCAmelCase : ImageInput , __UpperCAmelCase : bool = None , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : PILImageResampling = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **__UpperCAmelCase : Optional[int] , ) -> PIL.Image.Image: """simple docstring""" UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize UpperCAmelCase__ = size if size is not None else self.size UpperCAmelCase__ = get_size_dict(__UpperCAmelCase ) UpperCAmelCase__ = resample if resample is not None else self.resample UpperCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize UpperCAmelCase__ = clusters if clusters is not None else self.clusters UpperCAmelCase__ = np.array(__UpperCAmelCase ) UpperCAmelCase__ = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True." ) # All transformations expect numpy arrays. UpperCAmelCase__ = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: UpperCAmelCase__ = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_normalize: UpperCAmelCase__ = [self.normalize(image=__UpperCAmelCase ) for image in images] if do_color_quantize: UpperCAmelCase__ = [to_channel_dimension_format(__UpperCAmelCase , ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) UpperCAmelCase__ = np.array(__UpperCAmelCase ) UpperCAmelCase__ = color_quantize(__UpperCAmelCase , __UpperCAmelCase ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) UpperCAmelCase__ = images.shape[0] UpperCAmelCase__ = images.reshape(__UpperCAmelCase , -1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. UpperCAmelCase__ = list(__UpperCAmelCase ) else: UpperCAmelCase__ = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] UpperCAmelCase__ = {"input_ids": images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
486
1
"""simple docstring""" import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = torch.load(__A , map_location='cpu' ) _lowerCAmelCase : List[Any] = chkpt['model'] # We have the base model one level deeper than the original XLM repository _lowerCAmelCase : int = {} for k, v in state_dict.items(): if "pred_layer" in k: _lowerCAmelCase : Optional[int] = v else: _lowerCAmelCase : List[Any] = v _lowerCAmelCase : Tuple = chkpt['params'] _lowerCAmelCase : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(__A , (torch.FloatTensor, numpy.ndarray) )} _lowerCAmelCase : List[str] = chkpt['dico_word2id'] _lowerCAmelCase : List[Any] = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()} # Save pytorch-model _lowerCAmelCase : int = pytorch_dump_folder_path + '/' + WEIGHTS_NAME _lowerCAmelCase : Any = pytorch_dump_folder_path + '/' + CONFIG_NAME _lowerCAmelCase : Optional[Any] = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file'] print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(__A , __A ) print(f"""Save configuration file to {pytorch_config_dump_path}""" ) with open(__A , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(__A , indent=2 ) + '\n' ) print(f"""Save vocab file to {pytorch_config_dump_path}""" ) with open(__A , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(__A , indent=2 ) + '\n' ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _lowerCAmelCase = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
701
"""simple docstring""" import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { """vocab_file""": """vocab.json""", """tokenizer_config_file""": """tokenizer_config.json""", """merges_file""": """merges.txt""", } _lowerCAmelCase = { """vocab_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json""" ), }, """tokenizer_config_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json""" ), }, """merges_file""": { """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt""" ), }, } _lowerCAmelCase = """</w>""" _lowerCAmelCase = """@@ """ def lowerCamelCase__ ( _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : List[str] = set() _lowerCAmelCase : Dict = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _lowerCAmelCase : Any = char return pairs # Speech2Text2 has no max input length _lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4} class __UpperCamelCase ( a__ ): _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = ["input_ids", "attention_mask"] def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,): '''simple docstring''' super().__init__( unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,) _lowerCAmelCase : List[Any] = do_lower_case with open(_A ,encoding='utf-8' ) as vocab_handle: _lowerCAmelCase : Optional[int] = json.load(_A ) _lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" ) _lowerCAmelCase : Optional[Any] = None _lowerCAmelCase : Tuple = None else: with open(_A ,encoding='utf-8' ) as merges_handle: _lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1] _lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges] _lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) ) _lowerCAmelCase : Union[str, Any] = {} @property def __lowerCamelCase ( self ): '''simple docstring''' return len(self.decoder ) def __lowerCamelCase ( self ): '''simple docstring''' return dict(self.encoder ,**self.added_tokens_encoder ) def __lowerCamelCase ( self ,_A ): '''simple docstring''' _lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] _lowerCAmelCase : str = get_pairs(_A ) if not pairs: return token while True: _lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) ) if bigram not in self.bpe_ranks: break _lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram _lowerCAmelCase : Union[str, Any] = [] _lowerCAmelCase : Dict = 0 while i < len(_A ): try: _lowerCAmelCase : Dict = word.index(_A ,_A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _lowerCAmelCase : Optional[Any] = j if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _lowerCAmelCase : Optional[Any] = tuple(_A ) _lowerCAmelCase : List[str] = new_word if len(_A ) == 1: break else: _lowerCAmelCase : List[str] = get_pairs(_A ) _lowerCAmelCase : Any = ' '.join(_A ) if word == "\n " + BPE_TOKEN_MERGES: _lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES if word.endswith(_A ): _lowerCAmelCase : Dict = word.replace(_A ,'' ) _lowerCAmelCase : str = word.replace(' ' ,_A ) _lowerCAmelCase : str = word return word def __lowerCamelCase ( self ,_A ): '''simple docstring''' if self.bpe_ranks is None: raise ValueError( 'This tokenizer was instantiated without a `merges.txt` file, so' ' that it can only be used for decoding, not for encoding.' 'Make sure to provide `merges.txt` file at instantiation to enable ' 'encoding.' ) if self.do_lower_case: _lowerCAmelCase : Optional[Any] = text.lower() _lowerCAmelCase : Tuple = text.split() _lowerCAmelCase : Union[str, Any] = [] for token in text: if token: split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) ) return split_tokens def __lowerCamelCase ( self ,_A ): '''simple docstring''' return self.encoder.get(_A ,self.encoder.get(self.unk_token ) ) def __lowerCamelCase ( self ,_A ): '''simple docstring''' _lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token ) return result def __lowerCamelCase ( self ,_A ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = ' '.join(_A ) # make sure @@ tokens are concatenated _lowerCAmelCase : int = ''.join(string.split(_A ) ) return string def __lowerCamelCase ( self ,_A ,_A = None ): '''simple docstring''' if not os.path.isdir(_A ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _lowerCAmelCase : List[Any] = os.path.join( _A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) _lowerCAmelCase : str = os.path.join( _A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(_A ,'w' ,encoding='utf-8' ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' ) _lowerCAmelCase : str = 0 if self.bpe_ranks is None: return (vocab_file,) with open(_A ,'w' ,encoding='utf-8' ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!' ) _lowerCAmelCase : Dict = token_index writer.write(' '.join(_A ) + '\n' ) index += 1 return (vocab_file, merges_file)
16
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool lowerCAmelCase : int = { '''Acehnese Arabic''': '''ace_Arab''', '''Acehnese Latin''': '''ace_Latn''', '''Mesopotamian Arabic''': '''acm_Arab''', '''Ta\'izzi-Adeni Arabic''': '''acq_Arab''', '''Tunisian Arabic''': '''aeb_Arab''', '''Afrikaans''': '''afr_Latn''', '''South Levantine Arabic''': '''ajp_Arab''', '''Akan''': '''aka_Latn''', '''Amharic''': '''amh_Ethi''', '''North Levantine Arabic''': '''apc_Arab''', '''Modern Standard Arabic''': '''arb_Arab''', '''Modern Standard Arabic Romanized''': '''arb_Latn''', '''Najdi Arabic''': '''ars_Arab''', '''Moroccan Arabic''': '''ary_Arab''', '''Egyptian Arabic''': '''arz_Arab''', '''Assamese''': '''asm_Beng''', '''Asturian''': '''ast_Latn''', '''Awadhi''': '''awa_Deva''', '''Central Aymara''': '''ayr_Latn''', '''South Azerbaijani''': '''azb_Arab''', '''North Azerbaijani''': '''azj_Latn''', '''Bashkir''': '''bak_Cyrl''', '''Bambara''': '''bam_Latn''', '''Balinese''': '''ban_Latn''', '''Belarusian''': '''bel_Cyrl''', '''Bemba''': '''bem_Latn''', '''Bengali''': '''ben_Beng''', '''Bhojpuri''': '''bho_Deva''', '''Banjar Arabic''': '''bjn_Arab''', '''Banjar Latin''': '''bjn_Latn''', '''Standard Tibetan''': '''bod_Tibt''', '''Bosnian''': '''bos_Latn''', '''Buginese''': '''bug_Latn''', '''Bulgarian''': '''bul_Cyrl''', '''Catalan''': '''cat_Latn''', '''Cebuano''': '''ceb_Latn''', '''Czech''': '''ces_Latn''', '''Chokwe''': '''cjk_Latn''', '''Central Kurdish''': '''ckb_Arab''', '''Crimean Tatar''': '''crh_Latn''', '''Welsh''': '''cym_Latn''', '''Danish''': '''dan_Latn''', '''German''': '''deu_Latn''', '''Southwestern Dinka''': '''dik_Latn''', '''Dyula''': '''dyu_Latn''', '''Dzongkha''': '''dzo_Tibt''', '''Greek''': '''ell_Grek''', '''English''': '''eng_Latn''', '''Esperanto''': '''epo_Latn''', '''Estonian''': '''est_Latn''', '''Basque''': '''eus_Latn''', '''Ewe''': '''ewe_Latn''', '''Faroese''': '''fao_Latn''', '''Fijian''': '''fij_Latn''', '''Finnish''': '''fin_Latn''', '''Fon''': '''fon_Latn''', '''French''': '''fra_Latn''', '''Friulian''': '''fur_Latn''', '''Nigerian Fulfulde''': '''fuv_Latn''', '''Scottish Gaelic''': '''gla_Latn''', '''Irish''': '''gle_Latn''', '''Galician''': '''glg_Latn''', '''Guarani''': '''grn_Latn''', '''Gujarati''': '''guj_Gujr''', '''Haitian Creole''': '''hat_Latn''', '''Hausa''': '''hau_Latn''', '''Hebrew''': '''heb_Hebr''', '''Hindi''': '''hin_Deva''', '''Chhattisgarhi''': '''hne_Deva''', '''Croatian''': '''hrv_Latn''', '''Hungarian''': '''hun_Latn''', '''Armenian''': '''hye_Armn''', '''Igbo''': '''ibo_Latn''', '''Ilocano''': '''ilo_Latn''', '''Indonesian''': '''ind_Latn''', '''Icelandic''': '''isl_Latn''', '''Italian''': '''ita_Latn''', '''Javanese''': '''jav_Latn''', '''Japanese''': '''jpn_Jpan''', '''Kabyle''': '''kab_Latn''', '''Jingpho''': '''kac_Latn''', '''Kamba''': '''kam_Latn''', '''Kannada''': '''kan_Knda''', '''Kashmiri Arabic''': '''kas_Arab''', '''Kashmiri Devanagari''': '''kas_Deva''', '''Georgian''': '''kat_Geor''', '''Central Kanuri Arabic''': '''knc_Arab''', '''Central Kanuri Latin''': '''knc_Latn''', '''Kazakh''': '''kaz_Cyrl''', '''Kabiyè''': '''kbp_Latn''', '''Kabuverdianu''': '''kea_Latn''', '''Khmer''': '''khm_Khmr''', '''Kikuyu''': '''kik_Latn''', '''Kinyarwanda''': '''kin_Latn''', '''Kyrgyz''': '''kir_Cyrl''', '''Kimbundu''': '''kmb_Latn''', '''Northern Kurdish''': '''kmr_Latn''', '''Kikongo''': '''kon_Latn''', '''Korean''': '''kor_Hang''', '''Lao''': '''lao_Laoo''', '''Ligurian''': '''lij_Latn''', '''Limburgish''': '''lim_Latn''', '''Lingala''': '''lin_Latn''', '''Lithuanian''': '''lit_Latn''', '''Lombard''': '''lmo_Latn''', '''Latgalian''': '''ltg_Latn''', '''Luxembourgish''': '''ltz_Latn''', '''Luba-Kasai''': '''lua_Latn''', '''Ganda''': '''lug_Latn''', '''Luo''': '''luo_Latn''', '''Mizo''': '''lus_Latn''', '''Standard Latvian''': '''lvs_Latn''', '''Magahi''': '''mag_Deva''', '''Maithili''': '''mai_Deva''', '''Malayalam''': '''mal_Mlym''', '''Marathi''': '''mar_Deva''', '''Minangkabau Arabic ''': '''min_Arab''', '''Minangkabau Latin''': '''min_Latn''', '''Macedonian''': '''mkd_Cyrl''', '''Plateau Malagasy''': '''plt_Latn''', '''Maltese''': '''mlt_Latn''', '''Meitei Bengali''': '''mni_Beng''', '''Halh Mongolian''': '''khk_Cyrl''', '''Mossi''': '''mos_Latn''', '''Maori''': '''mri_Latn''', '''Burmese''': '''mya_Mymr''', '''Dutch''': '''nld_Latn''', '''Norwegian Nynorsk''': '''nno_Latn''', '''Norwegian Bokmål''': '''nob_Latn''', '''Nepali''': '''npi_Deva''', '''Northern Sotho''': '''nso_Latn''', '''Nuer''': '''nus_Latn''', '''Nyanja''': '''nya_Latn''', '''Occitan''': '''oci_Latn''', '''West Central Oromo''': '''gaz_Latn''', '''Odia''': '''ory_Orya''', '''Pangasinan''': '''pag_Latn''', '''Eastern Panjabi''': '''pan_Guru''', '''Papiamento''': '''pap_Latn''', '''Western Persian''': '''pes_Arab''', '''Polish''': '''pol_Latn''', '''Portuguese''': '''por_Latn''', '''Dari''': '''prs_Arab''', '''Southern Pashto''': '''pbt_Arab''', '''Ayacucho Quechua''': '''quy_Latn''', '''Romanian''': '''ron_Latn''', '''Rundi''': '''run_Latn''', '''Russian''': '''rus_Cyrl''', '''Sango''': '''sag_Latn''', '''Sanskrit''': '''san_Deva''', '''Santali''': '''sat_Olck''', '''Sicilian''': '''scn_Latn''', '''Shan''': '''shn_Mymr''', '''Sinhala''': '''sin_Sinh''', '''Slovak''': '''slk_Latn''', '''Slovenian''': '''slv_Latn''', '''Samoan''': '''smo_Latn''', '''Shona''': '''sna_Latn''', '''Sindhi''': '''snd_Arab''', '''Somali''': '''som_Latn''', '''Southern Sotho''': '''sot_Latn''', '''Spanish''': '''spa_Latn''', '''Tosk Albanian''': '''als_Latn''', '''Sardinian''': '''srd_Latn''', '''Serbian''': '''srp_Cyrl''', '''Swati''': '''ssw_Latn''', '''Sundanese''': '''sun_Latn''', '''Swedish''': '''swe_Latn''', '''Swahili''': '''swh_Latn''', '''Silesian''': '''szl_Latn''', '''Tamil''': '''tam_Taml''', '''Tatar''': '''tat_Cyrl''', '''Telugu''': '''tel_Telu''', '''Tajik''': '''tgk_Cyrl''', '''Tagalog''': '''tgl_Latn''', '''Thai''': '''tha_Thai''', '''Tigrinya''': '''tir_Ethi''', '''Tamasheq Latin''': '''taq_Latn''', '''Tamasheq Tifinagh''': '''taq_Tfng''', '''Tok Pisin''': '''tpi_Latn''', '''Tswana''': '''tsn_Latn''', '''Tsonga''': '''tso_Latn''', '''Turkmen''': '''tuk_Latn''', '''Tumbuka''': '''tum_Latn''', '''Turkish''': '''tur_Latn''', '''Twi''': '''twi_Latn''', '''Central Atlas Tamazight''': '''tzm_Tfng''', '''Uyghur''': '''uig_Arab''', '''Ukrainian''': '''ukr_Cyrl''', '''Umbundu''': '''umb_Latn''', '''Urdu''': '''urd_Arab''', '''Northern Uzbek''': '''uzn_Latn''', '''Venetian''': '''vec_Latn''', '''Vietnamese''': '''vie_Latn''', '''Waray''': '''war_Latn''', '''Wolof''': '''wol_Latn''', '''Xhosa''': '''xho_Latn''', '''Eastern Yiddish''': '''ydd_Hebr''', '''Yoruba''': '''yor_Latn''', '''Yue Chinese''': '''yue_Hant''', '''Chinese Simplified''': '''zho_Hans''', '''Chinese Traditional''': '''zho_Hant''', '''Standard Malay''': '''zsm_Latn''', '''Zulu''': '''zul_Latn''', } class SCREAMING_SNAKE_CASE__ ( __a ): '''simple docstring''' UpperCamelCase__ : List[Any] = '''facebook/nllb-200-distilled-600M''' UpperCamelCase__ : Any = ( '''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ''' '''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ''' '''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ''' '''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.''' ) UpperCamelCase__ : Dict = '''translator''' UpperCamelCase__ : Optional[Any] = AutoTokenizer UpperCamelCase__ : Optional[Any] = AutoModelForSeqaSeqLM UpperCamelCase__ : int = LANGUAGE_CODES UpperCamelCase__ : Optional[Any] = ['''text''', '''text''', '''text'''] UpperCamelCase__ : str = ['''text'''] def UpperCAmelCase_ ( self : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple ) -> Union[str, Any]: if src_lang not in self.lang_to_code: raise ValueError(f'''{src_lang} is not a supported language.''' ) if tgt_lang not in self.lang_to_code: raise ValueError(f'''{tgt_lang} is not a supported language.''' ) snake_case__ = self.lang_to_code[src_lang] snake_case__ = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( lowerCAmelCase__ , return_tensors="""pt""" , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[int] , lowerCAmelCase__ : List[str] ) -> Dict: return self.model.generate(**lowerCAmelCase__ ) def UpperCAmelCase_ ( self : Tuple , lowerCAmelCase__ : int ) -> str: return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCAmelCase__ )
214
from typing import Any class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self : List[str] , lowerCAmelCase__ : Any ) -> str: snake_case__ = data snake_case__ = None def __repr__( self : Optional[Any] ) -> str: return f'''Node({self.data})''' class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self : str ) -> List[Any]: snake_case__ = None def __iter__( self : str ) -> Any: snake_case__ = self.head while node: yield node.data snake_case__ = node.next def __len__( self : int ) -> int: return sum(1 for _ in self ) def __repr__( self : Dict ) -> str: return "->".join([str(lowerCAmelCase__ ) for item in self] ) def __getitem__( self : int , lowerCAmelCase__ : int ) -> Any: if not 0 <= index < len(self ): raise ValueError("""list index out of range.""" ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any ) -> None: if not 0 <= index < len(self ): raise ValueError("""list index out of range.""" ) snake_case__ = self.head for _ in range(lowerCAmelCase__ ): snake_case__ = current.next snake_case__ = data def UpperCAmelCase_ ( self : int , lowerCAmelCase__ : Any ) -> None: self.insert_nth(len(self ) , lowerCAmelCase__ ) def UpperCAmelCase_ ( self : int , lowerCAmelCase__ : Any ) -> None: self.insert_nth(0 , lowerCAmelCase__ ) def UpperCAmelCase_ ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Any ) -> None: if not 0 <= index <= len(self ): raise IndexError("""list index out of range""" ) snake_case__ = Node(lowerCAmelCase__ ) if self.head is None: snake_case__ = new_node elif index == 0: snake_case__ = self.head # link new_node to head snake_case__ = new_node else: snake_case__ = self.head for _ in range(index - 1 ): snake_case__ = temp.next snake_case__ = temp.next snake_case__ = new_node def UpperCAmelCase_ ( self : Optional[int] ) -> None: # print every node data print(self ) def UpperCAmelCase_ ( self : Dict ) -> Any: return self.delete_nth(0 ) def UpperCAmelCase_ ( self : List[Any] ) -> Any: # delete from tail return self.delete_nth(len(self ) - 1 ) def UpperCAmelCase_ ( self : List[str] , lowerCAmelCase__ : int = 0 ) -> Any: if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("""List index out of range.""" ) snake_case__ = self.head # default first node if index == 0: snake_case__ = self.head.next else: snake_case__ = self.head for _ in range(index - 1 ): snake_case__ = temp.next snake_case__ = temp.next snake_case__ = temp.next.next return delete_node.data def UpperCAmelCase_ ( self : Tuple ) -> bool: return self.head is None def UpperCAmelCase_ ( self : Any ) -> None: snake_case__ = None snake_case__ = self.head while current: # Store the current node's next node. snake_case__ = current.next # Make the current node's next point backwards snake_case__ = prev # Make the previous node be the current node snake_case__ = current # Make the current node the next node (to progress iteration) snake_case__ = next_node # Return prev in order to put the head at the end snake_case__ = prev def _lowercase ( ): snake_case__ = LinkedList() assert linked_list.is_empty() is True assert str(__UpperCamelCase ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(__UpperCamelCase ) == i linked_list.insert_nth(__UpperCamelCase , i + 1 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(__UpperCamelCase ) == 9 assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): snake_case__ = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(-8 , 1 ) ) def _lowercase ( ): snake_case__ = [ -9, 100, Node(7734_5112 ), """dlrow olleH""", 7, 5555, 0, -1_9_2.5_5_5_5_5, """Hello, world!""", 7_7.9, Node(10 ), None, None, 1_2.2_0, ] snake_case__ = LinkedList() for i in test_input: linked_list.insert_tail(__UpperCamelCase ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(__UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head snake_case__ = linked_list.delete_head() assert result == -9 assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail snake_case__ = linked_list.delete_tail() assert result == 1_2.2 assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list snake_case__ = linked_list.delete_nth(10 ) assert result is None assert ( str(__UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("""Hello again, world!""" ) ) assert ( str(__UpperCamelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(__UpperCamelCase ) assert ( str(__UpperCamelCase ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(__UpperCamelCase ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def _lowercase ( ): from doctest import testmod testmod() snake_case__ = LinkedList() linked_list.insert_head(input("""Inserting 1st at head """ ).strip() ) linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() ) print("""\nPrint list:""" ) linked_list.print_list() linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() ) linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() ) print("""\nPrint list:""" ) linked_list.print_list() print("""\nDelete head""" ) linked_list.delete_head() print("""Delete tail""" ) linked_list.delete_tail() print("""\nPrint list:""" ) linked_list.print_list() print("""\nReverse linked list""" ) linked_list.reverse() print("""\nPrint list:""" ) linked_list.print_list() print("""\nString representation of linked list:""" ) print(__UpperCamelCase ) print("""\nReading/changing Node data using indexing:""" ) print(F'''Element at Position 1: {linked_list[1]}''' ) snake_case__ = input("""Enter New Value: """ ).strip() print("""New list:""" ) print(__UpperCamelCase ) print(F'''length of linked_list is : {len(__UpperCamelCase )}''' ) if __name__ == "__main__": main()
214
1
'''simple docstring''' from __future__ import annotations import math def lowercase_ ( __A : list , __A : list ) -> list: """simple docstring""" if len(__A ) != 2 or len(a[0] ) != 2 or len(__A ) != 2 or len(b[0] ) != 2: raise Exception('''Matrices are not 2x2''' ) lowercase : Optional[Any] =[ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def lowercase_ ( __A : list , __A : list ) -> Optional[Any]: """simple docstring""" return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(__A ) ) ] def lowercase_ ( __A : list , __A : list ) -> Any: """simple docstring""" return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(__A ) ) ] def lowercase_ ( __A : list ) -> tuple[list, list, list, list]: """simple docstring""" if len(__A ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception('''Odd matrices are not supported!''' ) lowercase : str =len(__A ) lowercase : Optional[Any] =matrix_length // 2 lowercase : str =[[a[i][j] for j in range(__A , __A )] for i in range(__A )] lowercase : Union[str, Any] =[ [a[i][j] for j in range(__A , __A )] for i in range(__A , __A ) ] lowercase : str =[[a[i][j] for j in range(__A )] for i in range(__A )] lowercase : Dict =[[a[i][j] for j in range(__A )] for i in range(__A , __A )] return top_left, top_right, bot_left, bot_right def lowercase_ ( __A : list ) -> tuple[int, int]: """simple docstring""" return len(__A ), len(matrix[0] ) def lowercase_ ( __A : list ) -> None: """simple docstring""" print('''\n'''.join(str(__A ) for line in matrix ) ) def lowercase_ ( __A : list , __A : list ) -> list: """simple docstring""" if matrix_dimensions(__A ) == (2, 2): return default_matrix_multiplication(__A , __A ) lowercase : Any =split_matrix(__A ) lowercase : Optional[Any] =split_matrix(__A ) lowercase : Any =actual_strassen(__A , matrix_subtraction(__A , __A ) ) lowercase : Optional[Any] =actual_strassen(matrix_addition(__A , __A ) , __A ) lowercase : int =actual_strassen(matrix_addition(__A , __A ) , __A ) lowercase : Tuple =actual_strassen(__A , matrix_subtraction(__A , __A ) ) lowercase : List[Any] =actual_strassen(matrix_addition(__A , __A ) , matrix_addition(__A , __A ) ) lowercase : Any =actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) ) lowercase : Optional[Any] =actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) ) lowercase : Optional[int] =matrix_addition(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A ) lowercase : Union[str, Any] =matrix_addition(__A , __A ) lowercase : Dict =matrix_addition(__A , __A ) lowercase : int =matrix_subtraction(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A ) # construct the new matrix from our 4 quadrants lowercase : str =[] for i in range(len(__A ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(__A ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def lowercase_ ( __A : list , __A : list ) -> list: """simple docstring""" if matrix_dimensions(__A )[1] != matrix_dimensions(__A )[0]: lowercase : Dict =( '''Unable to multiply these matrices, please check the dimensions.\n''' F'Matrix A: {matrixa}\n' F'Matrix B: {matrixa}' ) raise Exception(__A ) lowercase : Dict =matrix_dimensions(__A ) lowercase : Optional[int] =matrix_dimensions(__A ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] lowercase : Optional[Any] =max(*__A , *__A ) lowercase : int =int(math.pow(2 , math.ceil(math.loga(__A ) ) ) ) lowercase : List[Any] =matrixa lowercase : Union[str, Any] =matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , __A ): if i < dimensiona[0]: for _ in range(dimensiona[1] , __A ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , __A ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) lowercase : Optional[int] =actual_strassen(__A , __A ) # Removing the additional zeros for i in range(0 , __A ): if i < dimensiona[0]: for _ in range(dimensiona[1] , __A ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": SCREAMING_SNAKE_CASE = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] SCREAMING_SNAKE_CASE = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
703
'''simple docstring''' import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class UpperCAmelCase_ ( __A ): """simple docstring""" @require_torch def A__ ( self : Tuple ) -> Optional[int]: '''simple docstring''' lowercase : Any =''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' lowercase : Optional[int] =''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' lowercase : Any =''' import socket def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache lowercase : Tuple ='''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(UpperCAmelCase ) BertModel.from_pretrained(UpperCAmelCase ) BertTokenizer.from_pretrained(UpperCAmelCase ) pipeline(task='''fill-mask''' , model=UpperCAmelCase ) # baseline - just load from_pretrained with normal network lowercase : List[str] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed lowercase : Tuple =self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowercase : Optional[Any] ='''1''' lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def A__ ( self : str ) -> List[str]: '''simple docstring''' lowercase : str =''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' lowercase : Optional[Any] =''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' lowercase : Optional[int] =''' import socket def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache lowercase : Optional[Any] ='''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(UpperCAmelCase ) BertModel.from_pretrained(UpperCAmelCase ) BertTokenizer.from_pretrained(UpperCAmelCase ) pipeline(task='''fill-mask''' , model=UpperCAmelCase ) # baseline - just load from_pretrained with normal network lowercase : Optional[Any] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed lowercase : str =self.get_env() lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def A__ ( self : Any ) -> Optional[Any]: '''simple docstring''' lowercase : Optional[Any] =''' from transformers import BertConfig, BertModel, BertTokenizer ''' lowercase : List[Any] =''' mname = "hf-internal-testing/tiny-random-bert-sharded" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) print("success") ''' lowercase : int =''' import socket def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled") socket.socket = offline_socket ''' # baseline - just load from_pretrained with normal network lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed lowercase : Optional[Any] =self.get_env() lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) # next emulate no network lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowercase : Any ='''1''' lowercase : Optional[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def A__ ( self : Tuple ) -> Optional[int]: '''simple docstring''' lowercase : Optional[int] =''' from transformers import pipeline ''' lowercase : List[Any] =''' mname = "hf-internal-testing/tiny-random-bert" pipe = pipeline(model=mname) ''' lowercase : Tuple =''' import socket def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled") socket.socket = offline_socket ''' lowercase : Tuple =self.get_env() lowercase : Optional[int] ='''1''' lowercase : Union[str, Any] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] lowercase : Dict =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( '''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , ) @require_torch def A__ ( self : int ) -> Optional[int]: '''simple docstring''' lowercase : List[str] =''' from transformers import AutoModel ''' lowercase : Dict =''' mname = "hf-internal-testing/test_dynamic_model" AutoModel.from_pretrained(mname, trust_remote_code=True) print("success") ''' # baseline - just load from_pretrained with normal network lowercase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed lowercase : Optional[Any] =self.get_env() lowercase : int =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowercase : List[str] ='''1''' lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() )
8
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer lowerCamelCase__ : Tuple = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast lowerCamelCase__ : List[str] = TaTokenizerFast lowerCamelCase__ : int = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : str = [ """MT5EncoderModel""", """MT5ForConditionalGeneration""", """MT5ForQuestionAnswering""", """MT5Model""", """MT5PreTrainedModel""", """MT5Stack""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Tuple = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : str = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys lowerCamelCase__ : Tuple = _LazyModule( __name__, globals()["""__file__"""], _import_structure, extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast}, module_spec=__spec__, )
12
import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class _snake_case ( UpperCAmelCase_ ): def __init__( self): '''simple docstring''' lowercase__ : List[Any] = [] def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_init_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_train_begin""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_train_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_epoch_begin""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_epoch_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_step_begin""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_step_end""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_evaluate""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_predict""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_save""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_log""") def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_): '''simple docstring''' self.events.append("""on_prediction_step""") @require_torch class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' lowercase__ : Dict = tempfile.mkdtemp() def lowercase__ ( self): '''simple docstring''' shutil.rmtree(self.output_dir) def lowercase__ ( self , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : Any = RegressionDataset(length=SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = RegressionDataset(length=SCREAMING_SNAKE_CASE_) lowercase__ : Dict = RegressionModelConfig(a=SCREAMING_SNAKE_CASE_ , b=SCREAMING_SNAKE_CASE_) lowercase__ : Any = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE_) lowercase__ : Any = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE_ , report_to=[] , **SCREAMING_SNAKE_CASE_) return Trainer( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , callbacks=SCREAMING_SNAKE_CASE_ , ) def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): '''simple docstring''' self.assertEqual(len(SCREAMING_SNAKE_CASE_) , len(SCREAMING_SNAKE_CASE_)) # Order doesn't matter lowercase__ : str = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__) lowercase__ : Tuple = sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_: cb.__name__ if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else cb.__class__.__name__) for cba, cba in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assertEqual(SCREAMING_SNAKE_CASE_ , cba.__class__) elif not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_): self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE_) else: self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self , SCREAMING_SNAKE_CASE_): '''simple docstring''' lowercase__ : int = ["""on_init_end""", """on_train_begin"""] lowercase__ : Union[str, Any] = 0 lowercase__ : Union[str, Any] = len(trainer.get_eval_dataloader()) lowercase__ : Dict = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader()) + ["""on_log""", """on_evaluate"""] for _ in range(trainer.state.num_train_epochs): expected_events.append("""on_epoch_begin""") for _ in range(SCREAMING_SNAKE_CASE_): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("""on_log""") if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("""on_save""") expected_events.append("""on_epoch_end""") if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def lowercase__ ( self): '''simple docstring''' lowercase__ : int = self.get_trainer() lowercase__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) # Callbacks passed at init are added to the default callbacks lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback]) expected_callbacks.append(SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback lowercase__ : Any = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE_) lowercase__ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback] lowercase__ : Tuple = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.remove(SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) lowercase__ : Optional[int] = self.get_trainer() lowercase__ : List[Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_) self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) trainer.add_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) # We can also add, pop, or remove by instance lowercase__ : Union[str, Any] = self.get_trainer() lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0] trainer.remove_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.remove(SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) lowercase__ : str = self.get_trainer() lowercase__ : Optional[Any] = trainer.callback_handler.callbacks[0] lowercase__ : Union[str, Any] = trainer.pop_callback(SCREAMING_SNAKE_CASE_) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) trainer.add_callback(SCREAMING_SNAKE_CASE_) expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE_) self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="""ignore""" , category=SCREAMING_SNAKE_CASE_) lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback]) trainer.train() lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) # Independent log/save/eval lowercase__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5) trainer.train() lowercase__ : List[str] = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5) trainer.train() lowercase__ : Dict = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) lowercase__ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""") trainer.train() lowercase__ : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) lowercase__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""") trainer.train() lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) # A bit of everything lowercase__ : Any = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , ) trainer.train() lowercase__ : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(SCREAMING_SNAKE_CASE_ , self.get_expected_events(SCREAMING_SNAKE_CASE_)) # warning should be emitted for duplicated callbacks with patch("""transformers.trainer_callback.logger.warning""") as warn_mock: lowercase__ : Dict = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(SCREAMING_SNAKE_CASE_) in warn_mock.call_args[0][0]
12
1
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a : Tuple = logging.get_logger(__name__) a : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} a : List[str] = { "vocab_file": { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json", "allenai/longformer-large-4096": ( "https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json" ), "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json" ), }, "merges_file": { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt", "allenai/longformer-large-4096": ( "https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt" ), "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt" ), }, } a : Any = { "allenai/longformer-base-4096": 40_96, "allenai/longformer-large-4096": 40_96, "allenai/longformer-large-4096-finetuned-triviaqa": 40_96, "allenai/longformer-base-4096-extra.pos.embd.only": 40_96, "allenai/longformer-large-4096-extra.pos.embd.only": 40_96, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowercase ( ): '''simple docstring''' UpperCAmelCase : List[Any] = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) UpperCAmelCase : Any = bs[:] UpperCAmelCase : int = 0 for b in range(2**8 ): if b not in bs: bs.append(__magic_name__ ) cs.append(2**8 + n ) n += 1 UpperCAmelCase : Any = [chr(__magic_name__ ) for n in cs] return dict(zip(__magic_name__ , __magic_name__ ) ) def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = set() UpperCAmelCase : List[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase : int = char return pairs class UpperCamelCase__ ( lowercase__ ): """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Any = ["input_ids", "attention_mask"] def __init__( self , snake_case , snake_case , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , **snake_case , ): '''simple docstring''' UpperCAmelCase : Any = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token UpperCAmelCase : Optional[int] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token UpperCAmelCase : List[str] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token UpperCAmelCase : Optional[int] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token UpperCAmelCase : str = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else unk_token UpperCAmelCase : List[str] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase : Optional[int] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token super().__init__( errors=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , **snake_case , ) with open(snake_case , encoding="utf-8" ) as vocab_handle: UpperCAmelCase : List[Any] = json.load(snake_case ) UpperCAmelCase : Tuple = {v: k for k, v in self.encoder.items()} UpperCAmelCase : Any = errors # how to handle errors in decoding UpperCAmelCase : Optional[int] = bytes_to_unicode() UpperCAmelCase : Any = {v: k for k, v in self.byte_encoder.items()} with open(snake_case , encoding="utf-8" ) as merges_handle: UpperCAmelCase : Optional[int] = merges_handle.read().split("\n" )[1:-1] UpperCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges] UpperCAmelCase : List[Any] = dict(zip(snake_case , range(len(snake_case ) ) ) ) UpperCAmelCase : int = {} UpperCAmelCase : Tuple = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCAmelCase : str = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property def A_ ( self ): '''simple docstring''' return len(self.encoder ) def A_ ( self ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def A_ ( self , snake_case ): '''simple docstring''' if token in self.cache: return self.cache[token] UpperCAmelCase : List[str] = tuple(snake_case ) UpperCAmelCase : List[Any] = get_pairs(snake_case ) if not pairs: return token while True: UpperCAmelCase : Any = min(snake_case , key=lambda snake_case : self.bpe_ranks.get(snake_case , float("inf" ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase : Optional[Any] = bigram UpperCAmelCase : Union[str, Any] = [] UpperCAmelCase : str = 0 while i < len(snake_case ): try: UpperCAmelCase : Tuple = word.index(snake_case , snake_case ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase : Dict = j if word[i] == first and i < len(snake_case ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase : Any = tuple(snake_case ) UpperCAmelCase : Union[str, Any] = new_word if len(snake_case ) == 1: break else: UpperCAmelCase : Optional[int] = get_pairs(snake_case ) UpperCAmelCase : str = " ".join(snake_case ) UpperCAmelCase : str = word return word def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Any = [] for token in re.findall(self.pat , snake_case ): UpperCAmelCase : List[str] = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case ).split(" " ) ) return bpe_tokens def A_ ( self , snake_case ): '''simple docstring''' return self.encoder.get(snake_case , self.encoder.get(self.unk_token ) ) def A_ ( self , snake_case ): '''simple docstring''' return self.decoder.get(snake_case ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[Any] = "".join(snake_case ) UpperCAmelCase : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def A_ ( self , snake_case , snake_case = None ): '''simple docstring''' if not os.path.isdir(snake_case ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return UpperCAmelCase : Any = os.path.join( snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase : Optional[int] = os.path.join( snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(snake_case , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case , ensure_ascii=snake_case ) + "\n" ) UpperCAmelCase : Tuple = 0 with open(snake_case , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) UpperCAmelCase : Tuple = token_index writer.write(" ".join(snake_case ) + "\n" ) index += 1 return vocab_file, merge_file def A_ ( self , snake_case , snake_case = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase : Optional[int] = [self.cls_token_id] UpperCAmelCase : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A_ ( self , snake_case , snake_case = None , snake_case = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case ) if token_ids_a is None: return [1] + ([0] * len(snake_case )) + [1] return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1] def A_ ( self , snake_case , snake_case = None ): '''simple docstring''' UpperCAmelCase : List[str] = [self.sep_token_id] UpperCAmelCase : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A_ ( self , snake_case , snake_case=False , **snake_case ): '''simple docstring''' UpperCAmelCase : str = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(snake_case ) > 0 and not text[0].isspace()): UpperCAmelCase : List[str] = " " + text return (text, kwargs)
707
'''simple docstring''' import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("Googling.....") a : str = "https://www.google.com/search?q=" + " ".join(sys.argv[1:]) a : List[Any] = requests.get(url, headers={"UserAgent": UserAgent().random}) # res.raise_for_status() with open("project1a.html", "wb") as out_file: # only for knowing the class for data in res.iter_content(1_00_00): out_file.write(data) a : Any = BeautifulSoup(res.text, "html.parser") a : int = list(soup.select(".eZt8xd"))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("href")) else: webbrowser.open(F'https://google.com{link.get("href")}')
609
0
from ... import PretrainedConfig snake_case__ : Union[str, Any] = { 'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json', } class _a ( A__ ): """simple docstring""" snake_case =NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP snake_case ="""nezha""" def __init__( self , _snake_case=2_1128 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3072 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=64 , _snake_case=2 , _snake_case=0.02 , _snake_case=1E-1_2 , _snake_case=0.1 , _snake_case=0 , _snake_case=2 , _snake_case=3 , _snake_case=True , **_snake_case , ): super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case ) _UpperCAmelCase =vocab_size _UpperCAmelCase =hidden_size _UpperCAmelCase =num_hidden_layers _UpperCAmelCase =num_attention_heads _UpperCAmelCase =hidden_act _UpperCAmelCase =intermediate_size _UpperCAmelCase =hidden_dropout_prob _UpperCAmelCase =attention_probs_dropout_prob _UpperCAmelCase =max_position_embeddings _UpperCAmelCase =max_relative_position _UpperCAmelCase =type_vocab_size _UpperCAmelCase =initializer_range _UpperCAmelCase =layer_norm_eps _UpperCAmelCase =classifier_dropout _UpperCAmelCase =use_cache
408
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class _a ( A__ ): """simple docstring""" def __init__( self , _snake_case , _snake_case ): _UpperCAmelCase =params _UpperCAmelCase =np.array(_snake_case ) _UpperCAmelCase =np.array([len(_snake_case ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , _snake_case ): return (self.token_ids[index], self.lengths[index]) def __len__( self ): return len(self.lengths ) def SCREAMING_SNAKE_CASE ( self ): assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def SCREAMING_SNAKE_CASE ( self ): _UpperCAmelCase =self.params.max_model_input_size _UpperCAmelCase =self.lengths > max_len logger.info(F"Splitting {sum(_snake_case )} too long sequences." ) def divide_chunks(_snake_case , _snake_case ): return [l[i : i + n] for i in range(0 , len(_snake_case ) , _snake_case )] _UpperCAmelCase =[] _UpperCAmelCase =[] if self.params.mlm: _UpperCAmelCase , _UpperCAmelCase =self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"] else: _UpperCAmelCase , _UpperCAmelCase =self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: _UpperCAmelCase =[] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: _UpperCAmelCase =np.insert(_snake_case , 0 , _snake_case ) if sub_s[-1] != sep_id: _UpperCAmelCase =np.insert(_snake_case , len(_snake_case ) , _snake_case ) assert len(_snake_case ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(_snake_case ) new_tok_ids.extend(_snake_case ) new_lengths.extend([len(_snake_case ) for l in sub_seqs] ) _UpperCAmelCase =np.array(_snake_case ) _UpperCAmelCase =np.array(_snake_case ) def SCREAMING_SNAKE_CASE ( self ): _UpperCAmelCase =len(self ) _UpperCAmelCase =self.lengths > 11 _UpperCAmelCase =self.token_ids[indices] _UpperCAmelCase =self.lengths[indices] _UpperCAmelCase =len(self ) logger.info(F"Remove {init_size - new_size} too short (<=11 tokens) sequences." ) def SCREAMING_SNAKE_CASE ( self ): if "unk_token" not in self.params.special_tok_ids: return else: _UpperCAmelCase =self.params.special_tok_ids["unk_token"] _UpperCAmelCase =len(self ) _UpperCAmelCase =np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) _UpperCAmelCase =(unk_occs / self.lengths) < 0.5 _UpperCAmelCase =self.token_ids[indices] _UpperCAmelCase =self.lengths[indices] _UpperCAmelCase =len(self ) logger.info(F"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." ) def SCREAMING_SNAKE_CASE ( self ): if not self.params.is_master: return logger.info(F"{len(self )} sequences" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def SCREAMING_SNAKE_CASE ( self , _snake_case ): _UpperCAmelCase =[t[0] for t in batch] _UpperCAmelCase =[t[1] for t in batch] assert len(_snake_case ) == len(_snake_case ) # Max for paddings _UpperCAmelCase =max(_snake_case ) # Pad token ids if self.params.mlm: _UpperCAmelCase =self.params.special_tok_ids["pad_token"] else: _UpperCAmelCase =self.params.special_tok_ids["unk_token"] _UpperCAmelCase =[list(t.astype(_snake_case ) ) + [pad_idx] * (max_seq_len_ - len(_snake_case )) for t in token_ids] assert len(tk_ ) == len(_snake_case ) assert all(len(_snake_case ) == max_seq_len_ for t in tk_ ) _UpperCAmelCase =torch.tensor(tk_ ) # (bs, max_seq_len_) _UpperCAmelCase =torch.tensor(_snake_case ) # (bs) return tk_t, lg_t
408
1
'''simple docstring''' import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=5_12 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.0_2 , SCREAMING_SNAKE_CASE=4 , ) -> List[str]: __lowerCAmelCase : int = parent __lowerCAmelCase : Any = batch_size __lowerCAmelCase : str = seq_length __lowerCAmelCase : Any = is_training __lowerCAmelCase : Dict = use_attention_mask __lowerCAmelCase : List[str] = use_token_type_ids __lowerCAmelCase : Dict = use_labels __lowerCAmelCase : Union[str, Any] = vocab_size __lowerCAmelCase : List[Any] = hidden_size __lowerCAmelCase : Tuple = num_hidden_layers __lowerCAmelCase : List[str] = num_attention_heads __lowerCAmelCase : List[Any] = intermediate_size __lowerCAmelCase : int = hidden_act __lowerCAmelCase : Optional[Any] = hidden_dropout_prob __lowerCAmelCase : List[Any] = attention_probs_dropout_prob __lowerCAmelCase : int = max_position_embeddings __lowerCAmelCase : Any = type_vocab_size __lowerCAmelCase : Optional[int] = type_sequence_label_size __lowerCAmelCase : Dict = initializer_range __lowerCAmelCase : Union[str, Any] = num_choices def snake_case ( self ) -> List[Any]: __lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase : int = None if self.use_attention_mask: __lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase : List[Any] = None if self.use_token_type_ids: __lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase : Optional[Any] = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def snake_case ( self ) -> Tuple: __lowerCAmelCase : List[Any] = self.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = config_and_inputs __lowerCAmelCase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class UpperCamelCase__ ( a , unittest.TestCase ): '''simple docstring''' _snake_case = True _snake_case = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def snake_case ( self ) -> Dict: __lowerCAmelCase : Tuple = FlaxRoFormerModelTester(self ) @slow def snake_case ( self ) -> List[Any]: for model_class_name in self.all_model_classes: __lowerCAmelCase : int = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = model(np.ones((1, 1) ) ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_flax class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def snake_case ( self ) -> Optional[int]: __lowerCAmelCase : Tuple = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' ) __lowerCAmelCase : Any = jnp.array([[0, 1, 2, 3, 4, 5]] ) __lowerCAmelCase : Any = model(SCREAMING_SNAKE_CASE )[0] __lowerCAmelCase : List[str] = 5_00_00 __lowerCAmelCase : List[Any] = (1, 6, vocab_size) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = jnp.array( [[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
123
'''simple docstring''' from abc import ABC, abstractmethod from argparse import ArgumentParser class UpperCamelCase__ ( a ): '''simple docstring''' @staticmethod @abstractmethod def snake_case ( SCREAMING_SNAKE_CASE ) -> Optional[Any]: raise NotImplementedError() @abstractmethod def snake_case ( self ) -> Optional[Any]: raise NotImplementedError()
123
1
'''simple docstring''' import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _a : '''simple docstring''' @staticmethod def UpperCamelCase_ ( *A, **A ): '''simple docstring''' pass @is_pipeline_test @require_vision class _a ( unittest.TestCase ): '''simple docstring''' @require_torch def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = pipeline( model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification', ) SCREAMING_SNAKE_CASE : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) SCREAMING_SNAKE_CASE : Optional[int] = image_classifier(A, candidate_labels=['a', 'b', 'c'] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(A ), [ [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}], [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'c'}, {'score': 0.3_33, 'label': 'b'}], ], ) SCREAMING_SNAKE_CASE : Union[str, Any] = image_classifier([image] * 5, candidate_labels=['A', 'B', 'C'], batch_size=2 ) self.assertEqual( nested_simplify(A ), [ [ {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, ], [ {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, ], [ {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, ], [ {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, ], [ {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, ], ], ) @require_tf def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = pipeline( model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification', framework='tf' ) SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) SCREAMING_SNAKE_CASE : Optional[int] = image_classifier(A, candidate_labels=['a', 'b', 'c'] ) self.assertEqual( nested_simplify(A ), [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}], ) SCREAMING_SNAKE_CASE : Any = image_classifier([image] * 5, candidate_labels=['A', 'B', 'C'], batch_size=2 ) self.assertEqual( nested_simplify(A ), [ [ {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, ], [ {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, ], [ {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, ], [ {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, ], [ {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, {'score': 0.3_33, 'label': ANY(A )}, ], ], ) @slow @require_torch def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = pipeline( task='zero-shot-image-classification', model='openai/clip-vit-base-patch32', ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) SCREAMING_SNAKE_CASE : List[str] = image_classifier(A, candidate_labels=['cat', 'plane', 'remote'] ) self.assertEqual( nested_simplify(A ), [ {'score': 0.5_11, 'label': 'remote'}, {'score': 0.4_85, 'label': 'cat'}, {'score': 0.0_04, 'label': 'plane'}, ], ) SCREAMING_SNAKE_CASE : List[Any] = image_classifier([image] * 5, candidate_labels=['cat', 'plane', 'remote'], batch_size=2 ) self.assertEqual( nested_simplify(A ), [ [ {'score': 0.5_11, 'label': 'remote'}, {'score': 0.4_85, 'label': 'cat'}, {'score': 0.0_04, 'label': 'plane'}, ], ] * 5, ) @slow @require_tf def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = pipeline( task='zero-shot-image-classification', model='openai/clip-vit-base-patch32', framework='tf' ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) SCREAMING_SNAKE_CASE : List[Any] = image_classifier(A, candidate_labels=['cat', 'plane', 'remote'] ) self.assertEqual( nested_simplify(A ), [ {'score': 0.5_11, 'label': 'remote'}, {'score': 0.4_85, 'label': 'cat'}, {'score': 0.0_04, 'label': 'plane'}, ], ) SCREAMING_SNAKE_CASE : Tuple = image_classifier([image] * 5, candidate_labels=['cat', 'plane', 'remote'], batch_size=2 ) self.assertEqual( nested_simplify(A ), [ [ {'score': 0.5_11, 'label': 'remote'}, {'score': 0.4_85, 'label': 'cat'}, {'score': 0.0_04, 'label': 'plane'}, ], ] * 5, )
28
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCamelCase : Tuple = logging.get_logger(__name__) __UpperCamelCase : Optional[int] = { '''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''', } class SCREAMING_SNAKE_CASE ( a_ , a_ ): """simple docstring""" lowercase__ = "bit" lowercase__ = ["preactivation", "bottleneck"] lowercase__ = ["SAME", "VALID"] def __init__( self : int ,lowercase_ : Optional[int]=3 ,lowercase_ : int=6_4 ,lowercase_ : Optional[int]=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] ,lowercase_ : Union[str, Any]=[3, 4, 6, 3] ,lowercase_ : List[Any]="preactivation" ,lowercase_ : Dict="relu" ,lowercase_ : Dict=None ,lowercase_ : Optional[Any]=3_2 ,lowercase_ : Any=0.0 ,lowercase_ : Optional[Any]=False ,lowercase_ : List[Any]=3_2 ,lowercase_ : Optional[Any]=1 ,lowercase_ : Optional[int]=None ,lowercase_ : int=None ,**lowercase_ : List[str] ,): super().__init__(**lowercase_ ) if layer_type not in self.layer_types: raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' ) if global_padding is not None: if global_padding.upper() in self.supported_padding: lowerCAmelCase__ : Optional[Any] = global_padding.upper() else: raise ValueError(F'Padding strategy {global_padding} not supported' ) lowerCAmelCase__ : int = num_channels lowerCAmelCase__ : int = embedding_size lowerCAmelCase__ : Any = hidden_sizes lowerCAmelCase__ : int = depths lowerCAmelCase__ : Union[str, Any] = layer_type lowerCAmelCase__ : Optional[int] = hidden_act lowerCAmelCase__ : int = global_padding lowerCAmelCase__ : List[Any] = num_groups lowerCAmelCase__ : List[Any] = drop_path_rate lowerCAmelCase__ : Union[str, Any] = embedding_dynamic_padding lowerCAmelCase__ : int = output_stride lowerCAmelCase__ : Optional[Any] = width_factor lowerCAmelCase__ : List[str] = ['''stem'''] + [F'stage{idx}' for idx in range(1 ,len(lowercase_ ) + 1 )] lowerCAmelCase__ ,lowerCAmelCase__ : Any = get_aligned_output_features_output_indices( out_features=lowercase_ ,out_indices=lowercase_ ,stage_names=self.stage_names )
450
0
"""simple docstring""" import enum import shutil import sys lowercase , lowercase = shutil.get_terminal_size() lowercase = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""} class SCREAMING_SNAKE_CASE_ ( enum.Enum): '''simple docstring''' __magic_name__ : Dict = 0 __magic_name__ : Dict = 1 def A__ ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[str]="" ) -> int: '''simple docstring''' sys.stdout.write(str(_UpperCAmelCase ) + end ) sys.stdout.flush() def A__ ( _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str]="" ) -> Optional[Any]: '''simple docstring''' forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , _UpperCAmelCase ) def A__ ( ) -> Optional[int]: '''simple docstring''' forceWrite("\r" ) def A__ ( _UpperCAmelCase : int , _UpperCAmelCase : str ) -> int: '''simple docstring''' forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" ) def A__ ( ) -> Tuple: '''simple docstring''' forceWrite(" " * TERMINAL_WIDTH ) reset_cursor() def A__ ( ) -> Optional[Any]: '''simple docstring''' reset_cursor() forceWrite("-" * TERMINAL_WIDTH )
712
"""simple docstring""" import math import unittest def A__ ( _UpperCAmelCase : int ) -> bool: '''simple docstring''' assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class SCREAMING_SNAKE_CASE_ ( unittest.TestCase): '''simple docstring''' def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' self.assertTrue(is_prime(2)) self.assertTrue(is_prime(3)) self.assertTrue(is_prime(5)) self.assertTrue(is_prime(7)) self.assertTrue(is_prime(11)) self.assertTrue(is_prime(13)) self.assertTrue(is_prime(17)) self.assertTrue(is_prime(19)) self.assertTrue(is_prime(23)) self.assertTrue(is_prime(29)) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' with self.assertRaises(lowerCamelCase__): is_prime(-19) self.assertFalse( is_prime(0) , "Zero doesn't have any positive factors, primes must have exactly two." , ) self.assertFalse( is_prime(1) , "One only has 1 positive factor, primes must have exactly two." , ) self.assertFalse(is_prime(2 * 2)) self.assertFalse(is_prime(2 * 3)) self.assertFalse(is_prime(3 * 3)) self.assertFalse(is_prime(3 * 5)) self.assertFalse(is_prime(3 * 5 * 7)) if __name__ == "__main__": unittest.main()
150
0
"""simple docstring""" import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch UpperCAmelCase =random.Random() def _A ( _a : List[Any] , _a : List[Any]=1.0 , _a : int=None , _a : Tuple=None ): """simple docstring""" if rng is None: A = global_rng A = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self ,lowerCamelCase_ ,lowerCamelCase_=7 ,lowerCamelCase_=4_0_0 ,lowerCamelCase_=2_0_0_0 ,lowerCamelCase_=1_0 ,lowerCamelCase_=1_6_0 ,lowerCamelCase_=8 ,lowerCamelCase_=0.0 ,lowerCamelCase_=4_0_0_0 ,lowerCamelCase_=False ,lowerCamelCase_=True ,) -> Any: A = parent A = batch_size A = min_seq_length A = max_seq_length A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) A = padding_value A = sampling_rate A = return_attention_mask A = do_normalize A = feature_size A = chunk_length A = hop_length def UpperCamelCase__ ( self ) -> str: return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCamelCase__ ( self ,lowerCamelCase_=False ,lowerCamelCase_=False ) -> int: def _flatten(lowerCamelCase_ ): return list(itertools.chain(*lowerCamelCase_ ) ) if equal_length: A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size A = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff ) ] if numpify: A = [np.asarray(lowerCamelCase_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowerCamelCase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' _lowerCamelCase = WhisperFeatureExtractor if is_speech_available() else None def UpperCamelCase__ ( self ) -> Union[str, Any]: A = WhisperFeatureExtractionTester(self ) def UpperCamelCase__ ( self ) -> List[Any]: A = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A = feat_extract_first.save_pretrained(lowerCamelCase_ )[0] check_json_file_has_correct_format(lowerCamelCase_ ) A = self.feature_extraction_class.from_pretrained(lowerCamelCase_ ) A = feat_extract_first.to_dict() A = feat_extract_second.to_dict() A = feat_extract_first.mel_filters A = feat_extract_second.mel_filters self.assertTrue(np.allclose(lowerCamelCase_ ,lowerCamelCase_ ) ) self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) def UpperCamelCase__ ( self ) -> int: A = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: A = os.path.join(lowerCamelCase_ ,"""feat_extract.json""" ) feat_extract_first.to_json_file(lowerCamelCase_ ) A = self.feature_extraction_class.from_json_file(lowerCamelCase_ ) A = feat_extract_first.to_dict() A = feat_extract_second.to_dict() A = feat_extract_first.mel_filters A = feat_extract_second.mel_filters self.assertTrue(np.allclose(lowerCamelCase_ ,lowerCamelCase_ ) ) self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) def UpperCamelCase__ ( self ) -> Optional[int]: # Tests that all call wrap to encode_plus and batch_encode_plus A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 A = [floats_list((1, x) )[0] for x in range(8_0_0 ,1_4_0_0 ,2_0_0 )] A = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs] # Test feature size A = feature_extractor(lowerCamelCase_ ,padding="""max_length""" ,return_tensors="""np""" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input A = feature_extractor(speech_inputs[0] ,return_tensors="""np""" ).input_features A = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ).input_features self.assertTrue(np.allclose(lowerCamelCase_ ,lowerCamelCase_ ,atol=1E-3 ) ) # Test batched A = feature_extractor(lowerCamelCase_ ,return_tensors="""np""" ).input_features A = feature_extractor(lowerCamelCase_ ,return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ ,lowerCamelCase_ ): self.assertTrue(np.allclose(lowerCamelCase_ ,lowerCamelCase_ ,atol=1E-3 ) ) # Test 2-D numpy arrays are batched. A = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)] A = np.asarray(lowerCamelCase_ ) A = feature_extractor(lowerCamelCase_ ,return_tensors="""np""" ).input_features A = feature_extractor(lowerCamelCase_ ,return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ ,lowerCamelCase_ ): self.assertTrue(np.allclose(lowerCamelCase_ ,lowerCamelCase_ ,atol=1E-3 ) ) # Test truncation required A = [floats_list((1, x) )[0] for x in range(2_0_0 ,(feature_extractor.n_samples + 5_0_0) ,2_0_0 )] A = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs] A = [x[: feature_extractor.n_samples] for x in speech_inputs] A = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs_truncated] A = feature_extractor(lowerCamelCase_ ,return_tensors="""np""" ).input_features A = feature_extractor(lowerCamelCase_ ,return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ ,lowerCamelCase_ ): self.assertTrue(np.allclose(lowerCamelCase_ ,lowerCamelCase_ ,atol=1E-3 ) ) def UpperCamelCase__ ( self ) -> Optional[Any]: import torch A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) A = np.random.rand(1_0_0 ,3_2 ).astype(np.floataa ) A = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: A = feature_extractor.pad([{"""input_features""": inputs}] ,return_tensors="""np""" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) A = feature_extractor.pad([{"""input_features""": inputs}] ,return_tensors="""pt""" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> int: A = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" ) # automatic decoding with librispeech A = ds.sort("""id""" ).select(range(lowerCamelCase_ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def UpperCamelCase__ ( self ) -> List[str]: # fmt: off A = torch.tensor( [ 0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51, 0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78, 0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54, -0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54 ] ) # fmt: on A = self._load_datasamples(1 ) A = WhisperFeatureExtractor() A = feature_extractor(lowerCamelCase_ ,return_tensors="""pt""" ).input_features self.assertEqual(input_features.shape ,(1, 8_0, 3_0_0_0) ) self.assertTrue(torch.allclose(input_features[0, 0, :3_0] ,lowerCamelCase_ ,atol=1E-4 ) ) def UpperCamelCase__ ( self ) -> Any: A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) A = self._load_datasamples(1 )[0] A = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue A = feat_extract.zero_mean_unit_var_norm([audio] ,attention_mask=lowerCamelCase_ )[0] self.assertTrue(np.all(np.mean(lowerCamelCase_ ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowerCamelCase_ ) - 1 ) < 1E-3 ) )
617
"""simple docstring""" import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase =get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right UpperCAmelCase =50_003 UpperCAmelCase =50_002 @require_sentencepiece @require_tokenizers class lowerCamelCase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' _lowerCamelCase = PLBartTokenizer _lowerCamelCase = None _lowerCamelCase = False def UpperCamelCase__ ( self ) -> Tuple: super().setUp() # We have a SentencePiece fixture for testing A = PLBartTokenizer(lowerCamelCase_ ,language_codes="""base""" ,keep_accents=lowerCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ ( self ) -> int: A = PLBartTokenizer(lowerCamelCase_ ,language_codes="""base""" ,keep_accents=lowerCamelCase_ ) A = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCamelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] ,) A = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCamelCase_ ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] ,) A = tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ ,[ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] ,) A = tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] ,) A = tokenizer.vocab_size A = [tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) for x in range(end - 4 ,lowerCamelCase_ )] self.assertListEqual(lowerCamelCase_ ,["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] ) A = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go""" A = tokenizer(lowerCamelCase_ ).input_ids self.assertEqual( tokenizer.decode(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ ,clean_up_tokenization_spaces=lowerCamelCase_ ) ,lowerCamelCase_ ,) def UpperCamelCase__ ( self ) -> Optional[Any]: A = PLBartTokenizer(lowerCamelCase_ ,language_codes="""multi""" ,keep_accents=lowerCamelCase_ ) A = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCamelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] ,) A = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCamelCase_ ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] ,) A = tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ ,[ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] ,) A = tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] ,) A = tokenizer.vocab_size A = [tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) for x in range(end - 7 ,lowerCamelCase_ )] self.assertListEqual( lowerCamelCase_ ,["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] ) A = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go""" A = tokenizer(lowerCamelCase_ ).input_ids self.assertEqual( tokenizer.decode(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ ,clean_up_tokenization_spaces=lowerCamelCase_ ) ,lowerCamelCase_ ,) @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' _lowerCamelCase = '''uclanlp/plbart-python-en_XX''' _lowerCamelCase = [ '''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''', '''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''', ] _lowerCamelCase = [ '''Returns the maximum value of a b c.''', '''Sums the values of a b c.''', ] _lowerCamelCase = [ 134, 5452, 33460, 33441, 33463, 33465, 33463, 33449, 988, 20, 33456, 19, 33456, 771, 39, 4258, 889, 3318, 33441, 33463, 33465, 33463, 33449, 2471, 2, PYTHON_CODE, ] @classmethod def UpperCamelCase__ ( cls ) -> List[str]: A = PLBartTokenizer.from_pretrained( cls.checkpoint_name ,language_codes="""base""" ,src_lang="""python""" ,tgt_lang="""en_XX""" ) A = 1 return cls def UpperCamelCase__ ( self ) -> Optional[int]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] ,5_0_0_0_1 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] ,5_0_0_0_2 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] ,5_0_0_0_3 ) def UpperCamelCase__ ( self ) -> Optional[int]: A = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens ,lowerCamelCase_ ) def UpperCamelCase__ ( self ) -> str: self.assertIn(lowerCamelCase_ ,self.tokenizer.all_special_ids ) A = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2] A = self.tokenizer.decode(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ ) A = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ ) self.assertNotIn(self.tokenizer.eos_token ,lowerCamelCase_ ) def UpperCamelCase__ ( self ) -> List[str]: A = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 2_0] self.assertIsInstance(src_text[0] ,lowerCamelCase_ ) A = 1_0 A = self.tokenizer(lowerCamelCase_ ,max_length=lowerCamelCase_ ,truncation=lowerCamelCase_ ).input_ids[0] self.assertEqual(ids[-2] ,2 ) self.assertEqual(ids[-1] ,lowerCamelCase_ ) self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) def UpperCamelCase__ ( self ) -> Any: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) ,[5_0_0_0_4, 5_0_0_0_1] ) def UpperCamelCase__ ( self ) -> Optional[int]: A = tempfile.mkdtemp() A = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCamelCase_ ) A = PLBartTokenizer.from_pretrained(lowerCamelCase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,lowerCamelCase_ ) @require_torch def UpperCamelCase__ ( self ) -> Optional[int]: A = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=lowerCamelCase_ ,return_tensors="""pt""" ) A = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() ,[2, PYTHON_CODE] ) self.assertEqual(batch.decoder_input_ids[1][0] ,lowerCamelCase_ ) self.assertEqual(batch.decoder_input_ids[1][-1] ,2 ) self.assertEqual(batch.labels[1][-2:].tolist() ,[2, EN_CODE] ) @require_torch def UpperCamelCase__ ( self ) -> str: A = self.tokenizer( self.src_text ,text_target=self.tgt_text ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=len(self.expected_src_tokens ) ,return_tensors="""pt""" ,) A = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id ) self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ ) self.assertEqual((2, 2_6) ,batch.input_ids.shape ) self.assertEqual((2, 2_6) ,batch.attention_mask.shape ) A = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens ,lowerCamelCase_ ) self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens ,[] ) self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, PYTHON_CODE] ) def UpperCamelCase__ ( self ) -> Tuple: A = self.tokenizer(self.src_text ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=3 ,return_tensors="""pt""" ) A = self.tokenizer( text_target=self.tgt_text ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=1_0 ,return_tensors="""pt""" ) A = targets["""input_ids"""] A = shift_tokens_right(lowerCamelCase_ ,self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.decoder_input_ids.shape[1] ,1_0 ) @require_torch def UpperCamelCase__ ( self ) -> List[Any]: A = self.tokenizer._build_translation_inputs( """A test""" ,return_tensors="""pt""" ,src_lang="""en_XX""" ,tgt_lang="""java""" ) self.assertEqual( nested_simplify(lowerCamelCase_ ) ,{ # A, test, EOS, en_XX """input_ids""": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]], """attention_mask""": [[1, 1, 1, 1]], # java """forced_bos_token_id""": 5_0_0_0_1, } ,)
617
1
import functools def lowerCamelCase_ ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] ) -> int: """simple docstring""" if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not all(isinstance(UpperCamelCase__ , UpperCamelCase__ ) for day in days ): raise ValueError('The parameter days should be a list of integers' ) if len(UpperCamelCase__ ) != 3 or not all(isinstance(UpperCamelCase__ , UpperCamelCase__ ) for cost in costs ): raise ValueError('The parameter costs should be a list of three integers' ) if len(UpperCamelCase__ ) == 0: return 0 if min(UpperCamelCase__ ) <= 0: raise ValueError('All days elements should be greater than 0' ) if max(UpperCamelCase__ ) >= 366: raise ValueError('All days elements should be less than 366' ) __lowerCamelCase = set(UpperCamelCase__ ) @functools.cache def dynamic_programming(UpperCamelCase__ : int ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
167
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> int: """simple docstring""" assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), F"""The input value of [n={number}] is not an integer""" if number == 1: return 2 elif number < 1: __lowerCamelCase = F"""The input value of [n={number}] has to be > 0""" raise ValueError(UpperCamelCase__ ) else: __lowerCamelCase = sylvester(number - 1 ) __lowerCamelCase = num - 1 __lowerCamelCase = num return lower * upper + 1 if __name__ == "__main__": print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
167
1
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class _lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ): __SCREAMING_SNAKE_CASE : Any = (DDPMScheduler,) def _a (self , **lowercase ): A_ : List[str] = { 'num_train_timesteps': 1000, 'beta_start': 0.00_01, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**__a ) return config def _a (self ): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=__a ) def _a (self ): for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__a , beta_end=__a ) def _a (self ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__a ) def _a (self ): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__a ) def _a (self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=__a ) def _a (self ): self.check_over_configs(thresholding=__a ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__a , prediction_type=__a , sample_max_value=__a , ) def _a (self ): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def _a (self ): for t in [0, 500, 999]: self.check_over_forward(time_step=__a ) def _a (self ): A_ : Optional[int] = self.scheduler_classes[0] A_ : str = self.get_scheduler_config() A_ : Optional[Any] = scheduler_class(**__a ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_09_79 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def _a (self ): A_ : Dict = self.scheduler_classes[0] A_ : str = self.get_scheduler_config() A_ : int = scheduler_class(**__a ) A_ : Tuple = len(__a ) A_ : Dict = self.dummy_model() A_ : Optional[Any] = self.dummy_sample_deter A_ : List[Any] = torch.manual_seed(0 ) for t in reversed(range(__a ) ): # 1. predict noise residual A_ : Optional[int] = model(__a , __a ) # 2. predict previous mean of sample x_t-1 A_ : Optional[int] = scheduler.step(__a , __a , __a , generator=__a ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance A_ : Optional[int] = pred_prev_sample A_ : Dict = torch.sum(torch.abs(__a ) ) A_ : Tuple = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.33_72 ) < 1E-3 def _a (self ): A_ : Union[str, Any] = self.scheduler_classes[0] A_ : List[str] = self.get_scheduler_config(prediction_type="""v_prediction""" ) A_ : List[str] = scheduler_class(**__a ) A_ : str = len(__a ) A_ : Optional[int] = self.dummy_model() A_ : Optional[int] = self.dummy_sample_deter A_ : Dict = torch.manual_seed(0 ) for t in reversed(range(__a ) ): # 1. predict noise residual A_ : List[Any] = model(__a , __a ) # 2. predict previous mean of sample x_t-1 A_ : Optional[Any] = scheduler.step(__a , __a , __a , generator=__a ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance A_ : int = pred_prev_sample A_ : Dict = torch.sum(torch.abs(__a ) ) A_ : int = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.26_31 ) < 1E-3 def _a (self ): A_ : List[str] = self.scheduler_classes[0] A_ : List[str] = self.get_scheduler_config() A_ : int = scheduler_class(**__a ) A_ : Optional[Any] = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__a ) A_ : Any = scheduler.timesteps for i, timestep in enumerate(__a ): if i == len(__a ) - 1: A_ : Dict = -1 else: A_ : Tuple = timesteps[i + 1] A_ : Optional[Any] = scheduler.previous_timestep(__a ) A_ : int = prev_t.item() self.assertEqual(__a , __a ) def _a (self ): A_ : List[Any] = self.scheduler_classes[0] A_ : List[Any] = self.get_scheduler_config() A_ : str = scheduler_class(**__a ) A_ : Any = [100, 87, 50, 51, 0] with self.assertRaises(__a , msg="""`custom_timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=__a ) def _a (self ): A_ : List[Any] = self.scheduler_classes[0] A_ : str = self.get_scheduler_config() A_ : Optional[Any] = scheduler_class(**__a ) A_ : Tuple = [100, 87, 50, 1, 0] A_ : List[str] = len(__a ) with self.assertRaises(__a , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=__a , timesteps=__a ) def _a (self ): A_ : List[str] = self.scheduler_classes[0] A_ : List[Any] = self.get_scheduler_config() A_ : Union[str, Any] = scheduler_class(**__a ) A_ : List[str] = [scheduler.config.num_train_timesteps] with self.assertRaises( __a , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=__a )
667
'''simple docstring''' import json import logging import os import sys from time import time from unittest.mock import patch from transformers.testing_utils import TestCasePlus, require_torch_tpu logging.basicConfig(level=logging.DEBUG) A__ : List[str] = logging.getLogger() def a_ ( _UpperCAmelCase : int ) -> Optional[int]: __snake_case : List[str] = {} __snake_case : List[str] = os.path.join(_UpperCAmelCase ,'all_results.json' ) if os.path.exists(_UpperCAmelCase ): with open(_UpperCAmelCase ,'r' ) as f: __snake_case : Dict = json.load(_UpperCAmelCase ) else: raise ValueError(f'''can\'t find {path}''' ) return results A__ : Dict = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) @require_torch_tpu class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def A_ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' import xla_spawn __snake_case : int = self.get_auto_remove_tmp_dir() __snake_case : Tuple = f''' ./examples/pytorch/text-classification/run_glue.py --num_cores=8 ./examples/pytorch/text-classification/run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --overwrite_output_dir --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --do_train --do_eval --debug tpu_metrics_debug --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --max_steps=10 --warmup_steps=2 --seed=42 --max_seq_length=128 '''.split() with patch.object(__a , 'argv' , __a ): __snake_case : Tuple = time() xla_spawn.main() __snake_case : Optional[int] = time() __snake_case : List[Any] = get_results(__a ) self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 ) # Assert that the script takes less than 500 seconds to make sure it doesn't hang. self.assertLess(end - start , 500 ) def A_ ( self : List[str] ) -> Optional[int]: '''simple docstring''' import xla_spawn __snake_case : Optional[Any] = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split() with patch.object(__a , 'argv' , __a ): xla_spawn.main()
286
0
'''simple docstring''' import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process A_ = logging.getLogger(__name__) A_ = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) A_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class UpperCAmelCase : '''simple docstring''' SCREAMING_SNAKE_CASE_ = field( default=UpperCAmelCase__ , metadata={ 'help': ( 'The model checkpoint for weights initialization. Leave None if you want to train a model from' ' scratch.' ) } , ) SCREAMING_SNAKE_CASE_ = field( default=UpperCAmelCase__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(UpperCAmelCase__ )} , ) SCREAMING_SNAKE_CASE_ = field( default=UpperCAmelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) SCREAMING_SNAKE_CASE_ = field( default=UpperCAmelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) SCREAMING_SNAKE_CASE_ = field( default=UpperCAmelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class UpperCAmelCase : '''simple docstring''' SCREAMING_SNAKE_CASE_ = field( default=UpperCAmelCase__ , metadata={'help': 'The input training data file (a text file).'} ) SCREAMING_SNAKE_CASE_ = field( default=UpperCAmelCase__ , metadata={ 'help': ( 'The input training data files (multiple files in glob format). ' 'Very often splitting large files to smaller files can prevent tokenizer going out of memory' ) } , ) SCREAMING_SNAKE_CASE_ = field( default=UpperCAmelCase__ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , ) SCREAMING_SNAKE_CASE_ = field( default=UpperCAmelCase__ , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , ) SCREAMING_SNAKE_CASE_ = field( default=UpperCAmelCase__ , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , ) SCREAMING_SNAKE_CASE_ = field( default=UpperCAmelCase__ , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , ) SCREAMING_SNAKE_CASE_ = field( default=UpperCAmelCase__ , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} ) SCREAMING_SNAKE_CASE_ = field(default=UpperCAmelCase__ , metadata={'help': 'Whether ot not to use whole word mask.'} ) SCREAMING_SNAKE_CASE_ = field( default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} ) SCREAMING_SNAKE_CASE_ = field( default=1 / 6 , metadata={ 'help': ( 'Ratio of length of a span of masked tokens to surrounding context length for permutation language' ' modeling.' ) } , ) SCREAMING_SNAKE_CASE_ = field( default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} ) SCREAMING_SNAKE_CASE_ = field( default=-1 , metadata={ 'help': ( 'Optional input sequence length after tokenization.' 'The training dataset will be truncated in block of this size for training.' 'Default to the model max input length for single sentence inputs (take into account special tokens).' ) } , ) SCREAMING_SNAKE_CASE_ = field( default=UpperCAmelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = False ,__UpperCamelCase = None ,) -> Tuple: def _dataset(__UpperCamelCase ,__UpperCamelCase=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' ) return LineByLineWithRefDataset( tokenizer=__UpperCamelCase ,file_path=__UpperCamelCase ,block_size=args.block_size ,ref_path=__UpperCamelCase ,) return LineByLineTextDataset(tokenizer=__UpperCamelCase ,file_path=__UpperCamelCase ,block_size=args.block_size ) else: return TextDataset( tokenizer=__UpperCamelCase ,file_path=__UpperCamelCase ,block_size=args.block_size ,overwrite_cache=args.overwrite_cache ,cache_dir=__UpperCamelCase ,) if evaluate: return _dataset(args.eval_data_file ,args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(__UpperCamelCase ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file ,args.train_ref_file ) def _UpperCamelCase ( ) -> int: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( 'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file ' 'or remove the --do_eval argument.' ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' ,__UpperCamelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: lowerCamelCase_ = AutoConfig.from_pretrained(model_args.config_name ,cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: lowerCamelCase_ = AutoConfig.from_pretrained(model_args.model_name_or_path ,cache_dir=model_args.cache_dir ) else: lowerCamelCase_ = CONFIG_MAPPING[model_args.model_type]() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.tokenizer_name: lowerCamelCase_ = AutoTokenizer.from_pretrained(model_args.tokenizer_name ,cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: lowerCamelCase_ = AutoTokenizer.from_pretrained(model_args.model_name_or_path ,cache_dir=model_args.cache_dir ) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another' ' script, save it,and load it from here, using --tokenizer_name' ) if model_args.model_name_or_path: lowerCamelCase_ = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=__UpperCamelCase ,cache_dir=model_args.cache_dir ,) else: logger.info('Training new model from scratch' ) lowerCamelCase_ = AutoModelWithLMHead.from_config(__UpperCamelCase ) model.resize_token_embeddings(len(__UpperCamelCase ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( 'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the' '--mlm flag (masked language modeling).' ) if data_args.block_size <= 0: lowerCamelCase_ = tokenizer.max_len # Our input block size will be the max possible for the model else: lowerCamelCase_ = min(data_args.block_size ,tokenizer.max_len ) # Get datasets lowerCamelCase_ = ( get_dataset(__UpperCamelCase ,tokenizer=__UpperCamelCase ,cache_dir=model_args.cache_dir ) if training_args.do_train else None ) lowerCamelCase_ = ( get_dataset(__UpperCamelCase ,tokenizer=__UpperCamelCase ,evaluate=__UpperCamelCase ,cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": lowerCamelCase_ = DataCollatorForPermutationLanguageModeling( tokenizer=__UpperCamelCase ,plm_probability=data_args.plm_probability ,max_span_length=data_args.max_span_length ,) else: if data_args.mlm and data_args.whole_word_mask: lowerCamelCase_ = DataCollatorForWholeWordMask( tokenizer=__UpperCamelCase ,mlm_probability=data_args.mlm_probability ) else: lowerCamelCase_ = DataCollatorForLanguageModeling( tokenizer=__UpperCamelCase ,mlm=data_args.mlm ,mlm_probability=data_args.mlm_probability ) # Initialize our Trainer lowerCamelCase_ = Trainer( model=__UpperCamelCase ,args=__UpperCamelCase ,data_collator=__UpperCamelCase ,train_dataset=__UpperCamelCase ,eval_dataset=__UpperCamelCase ,prediction_loss_only=__UpperCamelCase ,) # Training if training_args.do_train: lowerCamelCase_ = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=__UpperCamelCase ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowerCamelCase_ = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) lowerCamelCase_ = trainer.evaluate() lowerCamelCase_ = math.exp(eval_output['eval_loss'] ) lowerCamelCase_ = {'perplexity': perplexity} lowerCamelCase_ = os.path.join(training_args.output_dir ,'eval_results_lm.txt' ) if trainer.is_world_master(): with open(__UpperCamelCase ,'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s' ,__UpperCamelCase ,str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) results.update(__UpperCamelCase ) return results def _UpperCamelCase ( __UpperCamelCase ) -> Any: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
384
'''simple docstring''' import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets A_ = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n" A_ = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n" A_ = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n" def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str: return float((preds == labels).mean() ) def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str: lowerCamelCase_ = simple_accuracy(__UpperCamelCase ,__UpperCamelCase ) lowerCamelCase_ = float(fa_score(y_true=__UpperCamelCase ,y_pred=__UpperCamelCase ) ) return { "accuracy": acc, "f1": fa, } def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[str]: lowerCamelCase_ = np.array(__UpperCamelCase ) lowerCamelCase_ = np.array(__UpperCamelCase ) lowerCamelCase_ = en_sentvecs.shape[0] # mean centering lowerCamelCase_ = en_sentvecs - np.mean(__UpperCamelCase ,axis=0 ) lowerCamelCase_ = in_sentvecs - np.mean(__UpperCamelCase ,axis=0 ) lowerCamelCase_ = cdist(__UpperCamelCase ,__UpperCamelCase ,'cosine' ) lowerCamelCase_ = np.array(range(__UpperCamelCase ) ) lowerCamelCase_ = sim.argsort(axis=1 )[:, :10] lowerCamelCase_ = np.any(preds == actual[:, None] ,axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): '''simple docstring''' def UpperCamelCase( self ) -> Tuple: '''simple docstring''' if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( 'You should supply a configuration name selected in ' '["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ' '"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ' '"wiki-ner"]' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('int64' ) if self.config_name != 'cvit-mkb-clsr' else datasets.Sequence(datasets.Value('float32' ) ), 'references': datasets.Value('int64' ) if self.config_name != 'cvit-mkb-clsr' else datasets.Sequence(datasets.Value('float32' ) ), } ) , codebase_urls=[] , reference_urls=[] , format='numpy' if self.config_name != 'cvit-mkb-clsr' else None , ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int: '''simple docstring''' if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} else: raise KeyError( 'You should supply a configuration name selected in ' '["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ' '"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ' '"wiki-ner"]' )
384
1
from ...processing_utils import ProcessorMixin class _lowercase ( __lowerCamelCase ): _lowercase : int = 'SpeechT5FeatureExtractor' _lowercase : Optional[Any] = 'SpeechT5Tokenizer' def __init__( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] ) -> Optional[Any]: """simple docstring""" super().__init__(lowerCamelCase__ , lowerCamelCase__ ) def __call__( self : List[str] , *lowerCamelCase__ : int , **lowerCamelCase__ : List[str] ) -> Union[str, Any]: """simple docstring""" A_ = kwargs.pop('''audio''' , lowerCamelCase__ ) A_ = kwargs.pop('''text''' , lowerCamelCase__ ) A_ = kwargs.pop('''text_target''' , lowerCamelCase__ ) A_ = kwargs.pop('''audio_target''' , lowerCamelCase__ ) A_ = kwargs.pop('''sampling_rate''' , lowerCamelCase__ ) if audio is not None and text is not None: raise ValueError( '''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' ) if audio_target is not None and text_target is not None: raise ValueError( '''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' ) if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( '''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' ) if audio is not None: A_ = self.feature_extractor(lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ ) elif text is not None: A_ = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ ) else: A_ = None if audio_target is not None: A_ = self.feature_extractor(audio_target=lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ ) A_ = targets['''input_values'''] elif text_target is not None: A_ = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ ) A_ = targets['''input_ids'''] else: A_ = None if inputs is None: return targets if targets is not None: A_ = labels A_ = targets.get('''attention_mask''' ) if decoder_attention_mask is not None: A_ = decoder_attention_mask return inputs def UpperCamelCase ( self : str , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : int ) -> Optional[Any]: """simple docstring""" A_ = kwargs.pop('''input_values''' , lowerCamelCase__ ) A_ = kwargs.pop('''input_ids''' , lowerCamelCase__ ) A_ = kwargs.pop('''labels''' , lowerCamelCase__ ) if input_values is not None and input_ids is not None: raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' ) if input_values is None and input_ids is None and labels is None: raise ValueError( '''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' ) if input_values is not None: A_ = self.feature_extractor.pad(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) elif input_ids is not None: A_ = self.tokenizer.pad(lowerCamelCase__ , **lowerCamelCase__ ) else: A_ = None if labels is not None: if "input_ids" in labels or (isinstance(lowerCamelCase__ , lowerCamelCase__ ) and "input_ids" in labels[0]): A_ = self.tokenizer.pad(lowerCamelCase__ , **lowerCamelCase__ ) A_ = targets['''input_ids'''] else: A_ = self.feature_extractor.feature_size A_ = self.feature_extractor.num_mel_bins A_ = self.feature_extractor.pad(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) A_ = feature_size_hack A_ = targets['''input_values'''] else: A_ = None if inputs is None: return targets if targets is not None: A_ = labels A_ = targets.get('''attention_mask''' ) if decoder_attention_mask is not None: A_ = decoder_attention_mask return inputs def UpperCamelCase ( self : Dict , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Optional[Any] ) -> List[Any]: """simple docstring""" return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ ) def UpperCamelCase ( self : Any , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : List[Any] ) -> Optional[int]: """simple docstring""" return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
203
__lowercase = """Alexander Joslin""" import operator as op from .stack import Stack def _lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' A_ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub} A_ = Stack() A_ = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(SCREAMING_SNAKE_CASE ) ) elif i in operators: # RULE 2 operator_stack.push(SCREAMING_SNAKE_CASE ) elif i == ")": # RULE 4 A_ = operator_stack.peek() operator_stack.pop() A_ = operand_stack.peek() operand_stack.pop() A_ = operand_stack.peek() operand_stack.pop() A_ = operators[opr](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) operand_stack.push(SCREAMING_SNAKE_CASE ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": __lowercase = """(5 + ((4 * 2) * (2 + 3)))""" # answer = 45 print(f'{equation} = {dijkstras_two_stack_algorithm(equation)}')
203
1
import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType UpperCamelCase , UpperCamelCase , UpperCamelCase = False, False, False @dataclass class lowercase_ : A__ : Optional[int] = None A__ : bool = True A__ : bool = True A__ : Optional[str] = None # Automatically constructed A__ : ClassVar[str] = "dict" A__ : ClassVar[Any] = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) A__ : str = field(default='''Audio''', init=_UpperCAmelCase, repr=_UpperCAmelCase ) def __call__( self ) ->List[Any]: '''simple docstring''' return self.pa_type def lowerCamelCase__ ( self , a_ ) ->dict: '''simple docstring''' try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("To support encoding audio data, please install \'soundfile\'." ) from err if isinstance(a_ , a_ ): return {"bytes": None, "path": value} elif isinstance(a_ , a_ ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes _a = BytesIO() sf.write(a_ , value["array"] , value["sampling_rate"] , format="wav" ) return {"bytes": buffer.getvalue(), "path": None} elif value.get("path" ) is not None and os.path.isfile(value["path"] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("pcm" ): # "PCM" only has raw audio bytes if value.get("sampling_rate" ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("To use PCM files, please specify a \'sampling_rate\' in Audio object" ) if value.get("bytes" ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) _a = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7 else: _a = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 3_2_7_6_7 _a = BytesIO(bytes() ) sf.write(a_ , a_ , value["sampling_rate"] , format="wav" ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("path" )} elif value.get("bytes" ) is not None or value.get("path" ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("bytes" ), "path": value.get("path" )} else: raise ValueError( f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def lowerCamelCase__ ( self , a_ , a_ = None ) ->dict: '''simple docstring''' if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." ) _a , _a = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None) if path is None and file is None: raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError("To support decoding audio files, please install \'librosa\' and \'soundfile\'." ) from err _a = xsplitext(a_ )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( "Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( "Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, " "You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " ) if file is None: _a = token_per_repo_id or {} _a = path.split("::" )[-1] try: _a = string_to_dict(a_ , config.HUB_DATASETS_URL )["repo_id"] _a = token_per_repo_id[repo_id] except (ValueError, KeyError): _a = None with xopen(a_ , "rb" , use_auth_token=a_ ) as f: _a , _a = sf.read(a_ ) else: _a , _a = sf.read(a_ ) _a = array.T if self.mono: _a = librosa.to_mono(a_ ) if self.sampling_rate and self.sampling_rate != sampling_rate: _a = librosa.resample(a_ , orig_sr=a_ , target_sr=self.sampling_rate ) _a = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def lowerCamelCase__ ( self ) ->Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Value if self.decode: raise ValueError("Cannot flatten a decoded Audio feature." ) return { "bytes": Value("binary" ), "path": Value("string" ), } def lowerCamelCase__ ( self , a_ ) ->pa.StructArray: '''simple docstring''' if pa.types.is_string(storage.type ): _a = pa.array([None] * len(a_ ) , type=pa.binary() ) _a = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): _a = pa.array([None] * len(a_ ) , type=pa.string() ) _a = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ): _a = pa.array([Audio().encode_example(a_ ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("bytes" ) >= 0: _a = storage.field("bytes" ) else: _a = pa.array([None] * len(a_ ) , type=pa.binary() ) if storage.type.get_field_index("path" ) >= 0: _a = storage.field("path" ) else: _a = pa.array([None] * len(a_ ) , type=pa.string() ) _a = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() ) return array_cast(a_ , self.pa_type ) def lowerCamelCase__ ( self , a_ ) ->pa.StructArray: '''simple docstring''' @no_op_if_value_is_null def path_to_bytes(a_ ): with xopen(a_ , "rb" ) as f: _a = f.read() return bytes_ _a = pa.array( [ (path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) _a = pa.array( [os.path.basename(a_ ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , ) _a = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() ) return array_cast(a_ , self.pa_type )
701
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class lowercase_ (_UpperCAmelCase ): A__ : str = '''Salesforce/blip-image-captioning-base''' A__ : Optional[int] = ( '''This is a tool that generates a description of an image. It takes an input named `image` which should be the ''' '''image to caption, and returns a text that contains the description in English.''' ) A__ : str = '''image_captioner''' A__ : Tuple = AutoModelForVisionaSeq A__ : Tuple = ['''image'''] A__ : List[Any] = ['''text'''] def __init__( self , *a_ , **a_ ) ->Union[str, Any]: '''simple docstring''' requires_backends(self , ["vision"] ) super().__init__(*a_ , **a_ ) def lowerCamelCase__ ( self , a_ ) ->Union[str, Any]: '''simple docstring''' return self.pre_processor(images=a_ , return_tensors="pt" ) def lowerCamelCase__ ( self , a_ ) ->List[Any]: '''simple docstring''' return self.model.generate(**a_ ) def lowerCamelCase__ ( self , a_ ) ->Tuple: '''simple docstring''' return self.pre_processor.batch_decode(a_ , skip_special_tokens=a_ )[0].strip()
612
0
from scipy.stats import spearmanr import datasets UpperCAmelCase_ : Optional[Any] = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n" UpperCAmelCase_ : Tuple = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n" UpperCAmelCase_ : List[Any] = R"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): def A__ ( self :Optional[Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , ) def A__ ( self :Union[str, Any] , __snake_case :str , __snake_case :Optional[int] , __snake_case :str=False ): '''simple docstring''' __magic_name__ : Any =spearmanr(__snake_case , __snake_case ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
21
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass UpperCAmelCase__ : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1) UpperCAmelCase__ : Optional[Any] = (4, 6, 2, 0, 8, 10, 3, -2) @dataclass class __lowercase : __UpperCAmelCase = 42 __UpperCAmelCase = 42 class __lowercase : def __init__( self , lowercase_) -> None: __snake_case = None for i in sorted(lowercase_ , reverse=lowercase_): __snake_case = Node(lowercase_ , self.head) def __iter__( self) -> Iterator[int]: __snake_case = self.head while node: yield node.data __snake_case = node.next_node def __len__( self) -> int: return sum(1 for _ in self) def __str__( self) -> str: return " -> ".join([str(lowercase_) for node in self]) def A ( snake_case__ : SortedLinkedList , snake_case__ : SortedLinkedList ) -> SortedLinkedList: '''simple docstring''' return SortedLinkedList(list(snake_case__ ) + list(snake_case__ ) ) if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase__ : Optional[int] = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
313
0
"""simple docstring""" import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets A : Optional[Any] = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n' A : List[str] = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n' A : Dict = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n' def snake_case__ ( _snake_case : Dict , _snake_case : int ): """simple docstring""" return float((preds == labels).mean() ) def snake_case__ ( _snake_case : Optional[int] , _snake_case : List[Any] ): """simple docstring""" UpperCamelCase__ = simple_accuracy(_snake_case , _snake_case ) UpperCamelCase__ = float(fa_score(y_true=_snake_case , y_pred=_snake_case ) ) return { "accuracy": acc, "f1": fa, } def snake_case__ ( _snake_case : Union[str, Any] , _snake_case : Optional[int] ): """simple docstring""" UpperCamelCase__ = np.array(_snake_case ) UpperCamelCase__ = np.array(_snake_case ) UpperCamelCase__ = en_sentvecs.shape[0] # mean centering UpperCamelCase__ = en_sentvecs - np.mean(_snake_case , axis=0 ) UpperCamelCase__ = in_sentvecs - np.mean(_snake_case , axis=0 ) UpperCamelCase__ = cdist(_snake_case , _snake_case , "cosine" ) UpperCamelCase__ = np.array(range(_snake_case ) ) UpperCamelCase__ = sim.argsort(axis=1 )[:, :10] UpperCamelCase__ = np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase ( datasets.Metric ): '''simple docstring''' def lowerCamelCase__ ( self :List[str] ) -> Union[str, Any]: """simple docstring""" if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( "You should supply a configuration name selected in " "[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", " "\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", " "\"wiki-ner\"]" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" ) if self.config_name != "cvit-mkb-clsr" else datasets.Sequence(datasets.Value("float32" ) ), "references": datasets.Value("int64" ) if self.config_name != "cvit-mkb-clsr" else datasets.Sequence(datasets.Value("float32" ) ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , ) def lowerCamelCase__ ( self :List[Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Dict ) -> Optional[int]: """simple docstring""" if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(lowerCamelCase_ , lowerCamelCase_ )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(lowerCamelCase_ , lowerCamelCase_ ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )} else: raise KeyError( "You should supply a configuration name selected in " "[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", " "\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", " "\"wiki-ner\"]" )
304
"""simple docstring""" from typing import Any class lowerCAmelCase : '''simple docstring''' def __init__( self :Optional[int] , lowerCamelCase_ :Any ) -> Any: """simple docstring""" UpperCamelCase__ = data UpperCamelCase__ = None def __repr__( self :Dict ) -> str: """simple docstring""" return f'Node({self.data})' class lowerCAmelCase : '''simple docstring''' def __init__( self :Tuple ) -> Any: """simple docstring""" UpperCamelCase__ = None def __iter__( self :List[Any] ) -> Any: """simple docstring""" UpperCamelCase__ = self.head while node: yield node.data UpperCamelCase__ = node.next def __len__( self :Union[str, Any] ) -> int: """simple docstring""" return sum(1 for _ in self ) def __repr__( self :List[str] ) -> str: """simple docstring""" return "->".join([str(lowerCamelCase_ ) for item in self] ) def __getitem__( self :str , lowerCamelCase_ :int ) -> Any: """simple docstring""" if not 0 <= index < len(self ): raise ValueError("list index out of range." ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :Any ) -> None: """simple docstring""" if not 0 <= index < len(self ): raise ValueError("list index out of range." ) UpperCamelCase__ = self.head for _ in range(lowerCamelCase_ ): UpperCamelCase__ = current.next UpperCamelCase__ = data def lowerCamelCase__ ( self :int , lowerCamelCase_ :Any ) -> None: """simple docstring""" self.insert_nth(len(self ) , lowerCamelCase_ ) def lowerCamelCase__ ( self :Any , lowerCamelCase_ :Any ) -> None: """simple docstring""" self.insert_nth(0 , lowerCamelCase_ ) def lowerCamelCase__ ( self :Tuple , lowerCamelCase_ :int , lowerCamelCase_ :Any ) -> None: """simple docstring""" if not 0 <= index <= len(self ): raise IndexError("list index out of range" ) UpperCamelCase__ = Node(lowerCamelCase_ ) if self.head is None: UpperCamelCase__ = new_node elif index == 0: UpperCamelCase__ = self.head # link new_node to head UpperCamelCase__ = new_node else: UpperCamelCase__ = self.head for _ in range(index - 1 ): UpperCamelCase__ = temp.next UpperCamelCase__ = temp.next UpperCamelCase__ = new_node def lowerCamelCase__ ( self :int ) -> None: # print every node data """simple docstring""" print(self ) def lowerCamelCase__ ( self :Union[str, Any] ) -> Any: """simple docstring""" return self.delete_nth(0 ) def lowerCamelCase__ ( self :List[str] ) -> Any: # delete from tail """simple docstring""" return self.delete_nth(len(self ) - 1 ) def lowerCamelCase__ ( self :List[str] , lowerCamelCase_ :int = 0 ) -> Any: """simple docstring""" if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError("List index out of range." ) UpperCamelCase__ = self.head # default first node if index == 0: UpperCamelCase__ = self.head.next else: UpperCamelCase__ = self.head for _ in range(index - 1 ): UpperCamelCase__ = temp.next UpperCamelCase__ = temp.next UpperCamelCase__ = temp.next.next return delete_node.data def lowerCamelCase__ ( self :str ) -> bool: """simple docstring""" return self.head is None def lowerCamelCase__ ( self :Optional[Any] ) -> None: """simple docstring""" UpperCamelCase__ = None UpperCamelCase__ = self.head while current: # Store the current node's next node. UpperCamelCase__ = current.next # Make the current node's next point backwards UpperCamelCase__ = prev # Make the previous node be the current node UpperCamelCase__ = current # Make the current node the next node (to progress iteration) UpperCamelCase__ = next_node # Return prev in order to put the head at the end UpperCamelCase__ = prev def snake_case__ ( ): """simple docstring""" UpperCamelCase__ = LinkedList() assert linked_list.is_empty() is True assert str(_snake_case ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(_snake_case ) == i linked_list.insert_nth(_snake_case , i + 1 ) assert str(_snake_case ) == "->".join(str(_snake_case ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(_snake_case ) == "->".join(str(_snake_case ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(_snake_case ) == 9 assert str(_snake_case ) == "->".join(str(_snake_case ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): UpperCamelCase__ = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(_snake_case ) == "->".join(str(_snake_case ) for i in range(-8 , 1 ) ) def snake_case__ ( ): """simple docstring""" UpperCamelCase__ = [ -9, 1_00, Node(77_34_51_12 ), "dlrow olleH", 7, 55_55, 0, -192.55555, "Hello, world!", 77.9, Node(10 ), None, None, 12.20, ] UpperCamelCase__ = LinkedList() for i in test_input: linked_list.insert_tail(_snake_case ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(_snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head UpperCamelCase__ = linked_list.delete_head() assert result == -9 assert ( str(_snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail UpperCamelCase__ = linked_list.delete_tail() assert result == 12.2 assert ( str(_snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list UpperCamelCase__ = linked_list.delete_nth(10 ) assert result is None assert ( str(_snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node("Hello again, world!" ) ) assert ( str(_snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(_snake_case ) assert ( str(_snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(_snake_case ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def snake_case__ ( ): """simple docstring""" from doctest import testmod testmod() UpperCamelCase__ = LinkedList() linked_list.insert_head(input("Inserting 1st at head " ).strip() ) linked_list.insert_head(input("Inserting 2nd at head " ).strip() ) print("\nPrint list:" ) linked_list.print_list() linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() ) linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() ) print("\nPrint list:" ) linked_list.print_list() print("\nDelete head" ) linked_list.delete_head() print("Delete tail" ) linked_list.delete_tail() print("\nPrint list:" ) linked_list.print_list() print("\nReverse linked list" ) linked_list.reverse() print("\nPrint list:" ) linked_list.print_list() print("\nString representation of linked list:" ) print(_snake_case ) print("\nReading/changing Node data using indexing:" ) print(F'Element at Position 1: {linked_list[1]}' ) UpperCamelCase__ = input("Enter New Value: " ).strip() print("New list:" ) print(_snake_case ) print(F'length of linked_list is : {len(_snake_case )}' ) if __name__ == "__main__": main()
304
1
import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return EnvironmentCommand() def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' return EnvironmentCommand(args.accelerate_config_file ) class lowercase ( lowercase_ ): @staticmethod def a ( snake_case ): snake_case_ = parser.add_parser('env' ) download_parser.set_defaults(func=snake_case ) download_parser.add_argument( '--accelerate-config_file' , default=snake_case , help='The accelerate config file to use for the default values in the launching script.' , ) download_parser.set_defaults(func=snake_case ) def __init__( self , snake_case , *snake_case ): snake_case_ = accelerate_config_file def a ( self ): snake_case_ = 'not installed' if is_safetensors_available(): import safetensors snake_case_ = safetensors.__version__ elif importlib.util.find_spec('safetensors' ) is not None: import safetensors snake_case_ = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.''' snake_case_ = 'not installed' snake_case_ = snake_case_ = 'not found' if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file snake_case_ = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(snake_case ): snake_case_ = load_config_from_file(self._accelerate_config_file ).to_dict() snake_case_ = ( '\n'.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(snake_case , snake_case ) else F'''\t{accelerate_config}''' ) snake_case_ = 'not installed' snake_case_ = 'NA' if is_torch_available(): import torch snake_case_ = torch.__version__ snake_case_ = torch.cuda.is_available() snake_case_ = 'not installed' snake_case_ = 'NA' if is_tf_available(): import tensorflow as tf snake_case_ = tf.__version__ try: # deprecated in v2.1 snake_case_ = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool snake_case_ = bool(tf.config.list_physical_devices('GPU' ) ) snake_case_ = 'not installed' snake_case_ = 'not installed' snake_case_ = 'not installed' snake_case_ = 'NA' if is_flax_available(): import flax import jax import jaxlib snake_case_ = flax.__version__ snake_case_ = jax.__version__ snake_case_ = jaxlib.__version__ snake_case_ = jax.lib.xla_bridge.get_backend().platform snake_case_ = { '`transformers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'Huggingface_hub version': huggingface_hub.__version__, 'Safetensors version': F'''{safetensors_version}''', 'Accelerate version': F'''{accelerate_version}''', 'Accelerate config': F'''{accelerate_config_str}''', 'PyTorch version (GPU?)': F'''{pt_version} ({pt_cuda_available})''', 'Tensorflow version (GPU?)': F'''{tf_version} ({tf_cuda_available})''', 'Flax version (CPU?/GPU?/TPU?)': F'''{flax_version} ({jax_backend})''', 'Jax version': F'''{jax_version}''', 'JaxLib version': F'''{jaxlib_version}''', 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(snake_case ) ) return info @staticmethod def a ( snake_case ): return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
362
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _UpperCAmelCase : Tuple = logging.get_logger(__name__) _UpperCAmelCase : Optional[int] = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } _UpperCAmelCase : Union[str, Any] = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } _UpperCAmelCase : Union[str, Any] = {"""facebook/blenderbot_small-90M""": 512} def __lowerCamelCase ( UpperCamelCase__ ): '''simple docstring''' snake_case_ = set() snake_case_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) snake_case_ = char snake_case_ = set(UpperCamelCase__ ) return pairs class lowercase ( lowercase_ ): __SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : Optional[int] = ['''input_ids''', '''attention_mask'''] def __init__( self , snake_case , snake_case , snake_case="__start__" , snake_case="__end__" , snake_case="__unk__" , snake_case="__null__" , **snake_case , ): super().__init__(unk_token=snake_case , bos_token=snake_case , eos_token=snake_case , pad_token=snake_case , **snake_case ) with open(snake_case , encoding='utf-8' ) as vocab_handle: snake_case_ = json.load(snake_case ) snake_case_ = {v: k for k, v in self.encoder.items()} with open(snake_case , encoding='utf-8' ) as merges_handle: snake_case_ = merges_handle.read().split('\n' )[1:-1] snake_case_ = [tuple(merge.split() ) for merge in merges] snake_case_ = dict(zip(snake_case , range(len(snake_case ) ) ) ) snake_case_ = {} @property def a ( self ): return len(self.encoder ) def a ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def a ( self , snake_case ): if token in self.cache: return self.cache[token] snake_case_ = re.sub('([.,!?()])' , R' \1' , snake_case ) snake_case_ = re.sub('(\')' , R' \1 ' , snake_case ) snake_case_ = re.sub(R'\s{2,}' , ' ' , snake_case ) if "\n" in token: snake_case_ = token.replace('\n' , ' __newln__' ) snake_case_ = token.split(' ' ) snake_case_ = [] for token in tokens: if not len(snake_case ): continue snake_case_ = token.lower() snake_case_ = tuple(snake_case ) snake_case_ = tuple(list(word[:-1] ) + [word[-1] + '</w>'] ) snake_case_ = get_pairs(snake_case ) if not pairs: words.append(snake_case ) continue while True: snake_case_ = min(snake_case , key=lambda snake_case : self.bpe_ranks.get(snake_case , float('inf' ) ) ) if bigram not in self.bpe_ranks: break snake_case_ , snake_case_ = bigram snake_case_ = [] snake_case_ = 0 while i < len(snake_case ): try: snake_case_ = word.index(snake_case , snake_case ) new_word.extend(word[i:j] ) snake_case_ = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(snake_case ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 snake_case_ = tuple(snake_case ) snake_case_ = new_word if len(snake_case ) == 1: break else: snake_case_ = get_pairs(snake_case ) snake_case_ = '@@ '.join(snake_case ) snake_case_ = word[:-4] snake_case_ = word words.append(snake_case ) return " ".join(snake_case ) def a ( self , snake_case ): snake_case_ = [] snake_case_ = re.findall(R'\S+\n?' , snake_case ) for token in words: split_tokens.extend(list(self.bpe(snake_case ).split(' ' ) ) ) return split_tokens def a ( self , snake_case ): snake_case_ = token.lower() return self.encoder.get(snake_case , self.encoder.get(self.unk_token ) ) def a ( self , snake_case ): return self.decoder.get(snake_case , self.unk_token ) def a ( self , snake_case ): snake_case_ = ' '.join(snake_case ).replace('@@ ' , '' ).strip() return out_string def a ( self , snake_case , snake_case = None ): if not os.path.isdir(snake_case ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ = os.path.join( snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) snake_case_ = os.path.join( snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(snake_case , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case , ensure_ascii=snake_case ) + '\n' ) snake_case_ = 0 with open(snake_case , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) snake_case_ = token_index writer.write(' '.join(snake_case ) + '\n' ) index += 1 return vocab_file, merge_file
362
1
"""simple docstring""" import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process a = logging.getLogger(__name__) def lowercase (snake_case__ : Tuple , snake_case__ : int ) -> List[str]: '''simple docstring''' return (preds == labels).mean() @dataclass class SCREAMING_SNAKE_CASE__ : _a = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _a = field( default=_a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _a = field( default=_a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _a = field( default=_a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class SCREAMING_SNAKE_CASE__ : _a = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} ) _a = field(metadata={'help': 'Should contain the data files for the task.'} ) _a = field( default=1_28 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _a = field( default=_a , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def lowercase () -> List[str]: '''simple docstring''' lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , snake_case__ ) # Set seed set_seed(training_args.seed ) try: lowerCAmelCase = processors[data_args.task_name]() lowerCAmelCase = processor.get_labels() lowerCAmelCase = len(snake_case__ ) except KeyError: raise ValueError("""Task not found: %s""" % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCAmelCase = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) lowerCAmelCase = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , ) # Get datasets lowerCAmelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=snake_case__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) lowerCAmelCase = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=snake_case__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(snake_case__ : EvalPrediction ) -> Dict: lowerCAmelCase = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(snake_case__ , p.label_ids )} # Data collator lowerCAmelCase = DataCollatorWithPadding(snake_case__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer lowerCAmelCase = Trainer( model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , data_collator=snake_case__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowerCAmelCase = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) lowerCAmelCase = trainer.evaluate() lowerCAmelCase = os.path.join(training_args.output_dir , """eval_results.txt""" ) if trainer.is_world_master(): with open(snake_case__ , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in result.items(): logger.info(""" %s = %s""" , snake_case__ , snake_case__ ) writer.write("""%s = %s\n""" % (key, value) ) results.update(snake_case__ ) return results def lowercase (snake_case__ : List[Any] ) -> List[str]: '''simple docstring''' main() if __name__ == "__main__": main()
529
"""simple docstring""" import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__( self : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]=13 , lowerCAmelCase : List[str]=30 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Any=3 , lowerCAmelCase : str=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Dict=32 , lowerCAmelCase : Dict=5 , lowerCAmelCase : str=4 , lowerCAmelCase : List[Any]=37 , lowerCAmelCase : List[Any]="gelu" , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : Optional[int]=10 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : List[str]=None , ): lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = image_size lowerCAmelCase = patch_size lowerCAmelCase = num_channels lowerCAmelCase = is_training lowerCAmelCase = use_labels lowerCAmelCase = hidden_size lowerCAmelCase = num_hidden_layers lowerCAmelCase = num_attention_heads lowerCAmelCase = intermediate_size lowerCAmelCase = hidden_act lowerCAmelCase = hidden_dropout_prob lowerCAmelCase = attention_probs_dropout_prob lowerCAmelCase = type_sequence_label_size lowerCAmelCase = initializer_range lowerCAmelCase = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCAmelCase = (image_size // patch_size) ** 2 lowerCAmelCase = num_patches + 1 def __lowercase ( self : List[str] ): lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase = None if self.use_labels: lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase = self.get_config() return config, pixel_values, labels def __lowercase ( self : Tuple ): return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def __lowercase ( self : Any , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple ): lowerCAmelCase = ViTMSNModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCAmelCase = model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase ( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : int ): lowerCAmelCase = self.type_sequence_label_size lowerCAmelCase = ViTMSNForImageClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCAmelCase = model(lowerCAmelCase , labels=lowerCAmelCase ) print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" ) print("""Labels: {labels}""" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCAmelCase = 1 lowerCAmelCase = ViTMSNForImageClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase = model(lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowercase ( self : Optional[Any] ): lowerCAmelCase = self.prepare_config_and_inputs() lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs lowerCAmelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _a , _a , unittest.TestCase ): _a = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () _a = ( {'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification} if is_torch_available() else {} ) _a = False _a = False _a = False _a = False def __lowercase ( self : Tuple ): lowerCAmelCase = ViTMSNModelTester(self ) lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 ) def __lowercase ( self : Dict ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMSN does not use inputs_embeds""" ) def __lowercase ( self : Optional[Any] ): pass def __lowercase ( self : int ): lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase = model_class(lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) ) def __lowercase ( self : Optional[int] ): lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase = model_class(lowerCAmelCase ) lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase = [*signature.parameters.keys()] lowerCAmelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCAmelCase ) def __lowercase ( self : Union[str, Any] ): lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) def __lowercase ( self : Union[str, Any] ): lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase ) @slow def __lowercase ( self : Optional[Any] ): for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase = ViTMSNModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) def lowercase () -> Dict: '''simple docstring''' lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def __lowercase ( self : Dict ): return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None @slow def __lowercase ( self : Tuple ): torch.manual_seed(2 ) lowerCAmelCase = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(lowerCAmelCase ) lowerCAmelCase = self.default_image_processor lowerCAmelCase = prepare_img() lowerCAmelCase = image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase ) # forward pass with torch.no_grad(): lowerCAmelCase = model(**lowerCAmelCase ) # verify the logits lowerCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase ) lowerCAmelCase = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
529
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class __magic_name__ ( __UpperCamelCase): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = "philschmid/bart-large-cnn-samsum" SCREAMING_SNAKE_CASE__ : List[str] = ( "This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, " "and returns a summary of the text." ) SCREAMING_SNAKE_CASE__ : Any = "summarizer" SCREAMING_SNAKE_CASE__ : int = AutoTokenizer SCREAMING_SNAKE_CASE__ : List[Any] = AutoModelForSeqaSeqLM SCREAMING_SNAKE_CASE__ : List[str] = ["text"] SCREAMING_SNAKE_CASE__ : Optional[int] = ["text"] def _A ( self: Tuple , _lowerCamelCase: List[Any] ): return self.pre_processor(_lowerCamelCase , return_tensors='''pt''' , truncation=_lowerCamelCase ) def _A ( self: Optional[int] , _lowerCamelCase: Dict ): return self.model.generate(**_lowerCamelCase )[0] def _A ( self: Tuple , _lowerCamelCase: List[Any] ): return self.pre_processor.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
234
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Dict: lowerCamelCase__ : Optional[int] = [] embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""", F"""stage{idx}.patch_embed.proj.weight""", ) ) embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""", F"""stage{idx}.patch_embed.proj.bias""", ) ) embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""", F"""stage{idx}.patch_embed.norm.weight""", ) ) embed.append( ( F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""", F"""stage{idx}.patch_embed.norm.bias""", ) ) return embed def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> List[str]: lowerCamelCase__ : Optional[int] = [] attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""", F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.attn.proj.weight""", ) ) attention_weights.append( ( F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.attn.proj.bias""", ) ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") ) attention_weights.append( (F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") ) return attention_weights def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str: lowerCamelCase__ : Union[str, Any] = [] token.append((F"""cvt.encoder.stages.{idx}.cls_token""", 'stage2.cls_token') ) return token def SCREAMING_SNAKE_CASE ( ) -> Dict: lowerCamelCase__ : Tuple = [] head.append(('layernorm.weight', 'norm.weight') ) head.append(('layernorm.bias', 'norm.bias') ) head.append(('classifier.weight', 'head.weight') ) head.append(('classifier.bias', 'head.bias') ) return head def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]: lowerCamelCase__ : Union[str, Any] = 'imagenet-1k-id2label.json' lowerCamelCase__ : Tuple = 1000 lowerCamelCase__ : Tuple = 'huggingface/label-files' lowerCamelCase__ : Optional[int] = num_labels lowerCamelCase__ : Optional[Any] = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) ) , 'r' ) ) lowerCamelCase__ : Optional[int] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} lowerCamelCase__ : List[Any] = idalabel lowerCamelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()} lowerCamelCase__ : int = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13": lowerCamelCase__ : Dict = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21": lowerCamelCase__ : List[str] = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowerCamelCase__ : Tuple = [2, 2, 20] lowerCamelCase__ : Optional[int] = [3, 12, 16] lowerCamelCase__ : int = [192, 768, 1024] lowerCamelCase__ : Tuple = CvtForImageClassification(_UpperCAmelCase ) lowerCamelCase__ : str = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) lowerCamelCase__ : Optional[Any] = image_size lowerCamelCase__ : int = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) ) lowerCamelCase__ : Dict = OrderedDict() lowerCamelCase__ : Union[str, Any] = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: lowerCamelCase__ : str = list_of_state_dict + cls_token(_UpperCAmelCase ) lowerCamelCase__ : Tuple = list_of_state_dict + embeddings(_UpperCAmelCase ) for cnt in range(config.depth[idx] ): lowerCamelCase__ : List[str] = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase ) lowerCamelCase__ : Optional[Any] = list_of_state_dict + final() for gg in list_of_state_dict: print(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): lowerCamelCase__ : List[str] = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_UpperCAmelCase ) model.save_pretrained(_UpperCAmelCase ) image_processor.save_pretrained(_UpperCAmelCase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": _UpperCAmelCase : int = argparse.ArgumentParser() parser.add_argument( """--cvt_model""", default="""cvt-w24""", type=str, help="""Name of the cvt model you'd like to convert.""", ) parser.add_argument( """--image_size""", default=3_84, type=int, help="""Input Image Size""", ) parser.add_argument( """--cvt_file_name""", default=R"""cvtmodels\CvT-w24-384x384-IN-22k.pth""", type=str, help="""Input Image Size""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) _UpperCAmelCase : Optional[int] = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
295
0
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: __snake_case : int = mf_knapsack(i - 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) else: __snake_case : Any = max( mf_knapsack(i - 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , mf_knapsack(i - 1 , _lowerCamelCase , _lowerCamelCase , j - wt[i - 1] ) + val[i - 1] , ) __snake_case : List[Any] = val return f[i][j] def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str ): '''simple docstring''' __snake_case : str = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: __snake_case : Union[str, Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: __snake_case : Any = dp[i - 1][w_] return dp[n][w_], dp def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' if not (isinstance(_lowerCamelCase , (list, tuple) ) and isinstance(_lowerCamelCase , (list, tuple) )): raise ValueError( """Both the weights and values vectors must be either lists or tuples""" ) __snake_case : List[str] = len(_lowerCamelCase ) if num_items != len(_lowerCamelCase ): __snake_case : List[Any] = ( "The number of weights must be the same as the number of values.\n" F'''But got {num_items} weights and {len(_lowerCamelCase )} values''' ) raise ValueError(_lowerCamelCase ) for i in range(_lowerCamelCase ): if not isinstance(wt[i] , _lowerCamelCase ): __snake_case : str = ( "All weights must be integers but got weight of " F'''type {type(wt[i] )} at index {i}''' ) raise TypeError(_lowerCamelCase ) __snake_case : Optional[Any] = knapsack(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) __snake_case : set = set() _construct_solution(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) return optimal_val, example_optional_set def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(_lowerCamelCase , _lowerCamelCase , i - 1 , _lowerCamelCase , _lowerCamelCase ) else: optimal_set.add(_lowerCamelCase ) _construct_solution(_lowerCamelCase , _lowerCamelCase , i - 1 , j - wt[i - 1] , _lowerCamelCase ) if __name__ == "__main__": lowercase_ = [3, 2, 4, 4] lowercase_ = [4, 3, 2, 3] lowercase_ = 4 lowercase_ = 6 lowercase_ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] lowercase_ , lowercase_ = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 lowercase_ , lowercase_ = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("optimal_value = ", optimal_solution) print("An optimal subset corresponding to the optimal value", optimal_subset)
717
# Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar lowercase_ = TypeVar("T") class SCREAMING_SNAKE_CASE__ ( Generic[T] ): def __init__( self : List[Any] , _lowerCAmelCase : bool = True ): __snake_case : dict[T, list[T]] = {} # dictionary of lists __snake_case : Union[str, Any] = directed def snake_case__ ( self : Any , _lowerCAmelCase : T , _lowerCAmelCase : T ): if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(_lowerCAmelCase ) self.adj_list[destination_vertex].append(_lowerCAmelCase ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(_lowerCAmelCase ) __snake_case : Union[str, Any] = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(_lowerCAmelCase ) __snake_case : List[str] = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: __snake_case : Any = [destination_vertex] __snake_case : Tuple = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(_lowerCAmelCase ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(_lowerCAmelCase ) __snake_case : Tuple = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: __snake_case : str = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: __snake_case : Union[str, Any] = [destination_vertex] __snake_case : List[Any] = [] return self def __repr__( self : List[str] ): return pformat(self.adj_list )
390
0
import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class _lowerCamelCase( enum.Enum ): lowercase_ : int = 0 lowercase_ : str = 1 lowercase_ : Optional[Any] = 2 @add_end_docstrings(_a ) class _lowerCamelCase( _a ): lowercase_ : List[str] = """ In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos> """ def __init__( self, *lowerCamelCase, **lowerCamelCase) -> int: """simple docstring""" super().__init__(*lowerCamelCase, **lowerCamelCase) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. _lowercase : int = None if self.model.config.prefix is not None: _lowercase : str = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. _lowercase : str = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. _lowercase , _lowercase , _lowercase : Optional[Any] = self._sanitize_parameters(prefix=lowerCamelCase, **self._forward_params) _lowercase : Dict = {**self._preprocess_params, **preprocess_params} _lowercase : str = {**self._forward_params, **forward_params} def UpperCamelCase ( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase, ) -> int: """simple docstring""" _lowercase : List[str] = {} if prefix is not None: _lowercase : str = prefix if prefix: _lowercase : Dict = self.tokenizer( lowerCamelCase, padding=lowerCamelCase, add_special_tokens=lowerCamelCase, return_tensors=self.framework) _lowercase : Any = prefix_inputs['input_ids'].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected''' ' [None, \'hole\']') _lowercase : str = handle_long_generation preprocess_params.update(lowerCamelCase) _lowercase : Optional[int] = generate_kwargs _lowercase : Union[str, Any] = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_full_text`') if return_tensors is not None: raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`') _lowercase : Union[str, Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_tensors`') _lowercase : List[str] = ReturnType.TENSORS if return_type is not None: _lowercase : Tuple = return_type if clean_up_tokenization_spaces is not None: _lowercase : Optional[Any] = clean_up_tokenization_spaces if stop_sequence is not None: _lowercase : List[Any] = self.tokenizer.encode(lowerCamelCase, add_special_tokens=lowerCamelCase) if len(lowerCamelCase) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.') _lowercase : Optional[Any] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> Tuple: """simple docstring""" if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'add_space_before_punct_symbol': True}) return super()._parse_and_tokenize(*lowerCamelCase, **lowerCamelCase) def __call__( self, lowerCamelCase, **lowerCamelCase) -> int: """simple docstring""" return super().__call__(lowerCamelCase, **lowerCamelCase) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase="", lowerCamelCase=None, **lowerCamelCase) -> Dict: """simple docstring""" _lowercase : List[str] = self.tokenizer( prefix + prompt_text, padding=lowerCamelCase, add_special_tokens=lowerCamelCase, return_tensors=self.framework) _lowercase : Optional[int] = prompt_text if handle_long_generation == "hole": _lowercase : Dict = inputs['input_ids'].shape[-1] if "max_new_tokens" in generate_kwargs: _lowercase : Any = generate_kwargs['max_new_tokens'] else: _lowercase : Tuple = generate_kwargs.get('max_length', self.model.config.max_length) - cur_len if new_tokens < 0: raise ValueError('We cannot infer how many new tokens are expected') if cur_len + new_tokens > self.tokenizer.model_max_length: _lowercase : Optional[Any] = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( 'We cannot use `hole` to handle this generation the number of desired tokens exceeds the' ' models max length') _lowercase : Tuple = inputs['input_ids'][:, -keep_length:] if "attention_mask" in inputs: _lowercase : Optional[int] = inputs['attention_mask'][:, -keep_length:] return inputs def UpperCamelCase ( self, lowerCamelCase, **lowerCamelCase) -> Dict: """simple docstring""" _lowercase : Any = model_inputs['input_ids'] _lowercase : str = model_inputs.get('attention_mask', lowerCamelCase) # Allow empty prompts if input_ids.shape[1] == 0: _lowercase : List[str] = None _lowercase : int = None _lowercase : str = 1 else: _lowercase : Dict = input_ids.shape[0] _lowercase : int = model_inputs.pop('prompt_text') # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. _lowercase : Optional[int] = generate_kwargs.pop('prefix_length', 0) if prefix_length > 0: _lowercase : int = 'max_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].max_new_tokens is not None ) if not has_max_new_tokens: _lowercase : Union[str, Any] = generate_kwargs.get('max_length') or self.model.config.max_length generate_kwargs["max_length"] += prefix_length _lowercase : str = 'min_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL _lowercase : Dict = self.model.generate(input_ids=lowerCamelCase, attention_mask=lowerCamelCase, **lowerCamelCase) _lowercase : int = generated_sequence.shape[0] if self.framework == "pt": _lowercase : Optional[Any] = generated_sequence.reshape(lowerCamelCase, out_b // in_b, *generated_sequence.shape[1:]) elif self.framework == "tf": _lowercase : Optional[int] = tf.reshape(lowerCamelCase, (in_b, out_b // in_b, *generated_sequence.shape[1:])) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=ReturnType.FULL_TEXT, lowerCamelCase=True) -> List[Any]: """simple docstring""" _lowercase : Tuple = model_outputs['generated_sequence'][0] _lowercase : str = model_outputs['input_ids'] _lowercase : Any = model_outputs['prompt_text'] _lowercase : str = generated_sequence.numpy().tolist() _lowercase : Union[str, Any] = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: _lowercase : Dict = {'generated_token_ids': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text _lowercase : Union[str, Any] = self.tokenizer.decode( lowerCamelCase, skip_special_tokens=lowerCamelCase, clean_up_tokenization_spaces=lowerCamelCase, ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: _lowercase : Union[str, Any] = 0 else: _lowercase : Dict = len( self.tokenizer.decode( input_ids[0], skip_special_tokens=lowerCamelCase, clean_up_tokenization_spaces=lowerCamelCase, )) if return_type == ReturnType.FULL_TEXT: _lowercase : int = prompt_text + text[prompt_length:] else: _lowercase : List[str] = text[prompt_length:] _lowercase : Dict = {'generated_text': all_text} records.append(lowerCamelCase) return records
89
UpperCamelCase_ = '0.21.0' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
132
0
"""simple docstring""" import sys def lowercase (_lowerCAmelCase ): __lowerCAmelCase = len(_lowerCAmelCase ) __lowerCAmelCase = [[0 for x in range(_lowerCAmelCase )] for x in range(_lowerCAmelCase )] __lowerCAmelCase = [[0 for x in range(_lowerCAmelCase )] for x in range(_lowerCAmelCase )] for chain_length in range(2 , _lowerCAmelCase ): for a in range(1 , n - chain_length + 1 ): __lowerCAmelCase = a + chain_length - 1 __lowerCAmelCase = sys.maxsize for c in range(_lowerCAmelCase , _lowerCAmelCase ): __lowerCAmelCase = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: __lowerCAmelCase = cost __lowerCAmelCase = c return matrix, sol def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): if i == j: print("""A""" + str(_lowerCAmelCase ) , end=""" """ ) else: print("""(""" , end=""" """ ) print_optiomal_solution(_lowerCAmelCase , _lowerCAmelCase , optimal_solution[i][j] ) print_optiomal_solution(_lowerCAmelCase , optimal_solution[i][j] + 1 , _lowerCAmelCase ) print(""")""" , end=""" """ ) def lowercase (): __lowerCAmelCase = [30, 35, 15, 5, 10, 20, 25] __lowerCAmelCase = len(_lowerCAmelCase ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 __lowerCAmelCase , __lowerCAmelCase = matrix_chain_order(_lowerCAmelCase ) print("""No. of Operation required: """ + str(matrix[1][n - 1] ) ) print_optiomal_solution(_lowerCAmelCase , 1 , n - 1 ) if __name__ == "__main__": main()
705
"""simple docstring""" import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) class lowerCAmelCase_ ( A__ ): '''simple docstring''' def __init__( self , *snake_case_ , **snake_case_ ) -> None: warnings.warn( """The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use GLPNImageProcessor instead.""" , snake_case_ , ) super().__init__(*snake_case_ , **snake_case_ )
573
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """bigscience/bloom""": """https://huggingface.co/bigscience/bloom/resolve/main/config.json""", """bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/config.json""", """bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json""", """bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json""", """bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/config.json""", """bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json""", } class UpperCamelCase__ ( lowerCamelCase__ ): '''simple docstring''' __a : Tuple = """bloom""" __a : Optional[Any] = ["""past_key_values"""] __a : Tuple = { """num_hidden_layers""": """n_layer""", """num_attention_heads""": """n_head""", } def __init__( self, snake_case__=25_08_80, snake_case__=64, snake_case__=2, snake_case__=8, snake_case__=1E-5, snake_case__=0.02, snake_case__=True, snake_case__=1, snake_case__=2, snake_case__=False, snake_case__=0.0, snake_case__=0.0, snake_case__=1, snake_case__=False, **snake_case__, ) -> str: """simple docstring""" lowercase_ : List[str] = vocab_size # Backward compatibility with n_embed kwarg lowercase_ : Any = kwargs.pop("""n_embed""", snake_case__ ) lowercase_ : List[Any] = hidden_size if n_embed is None else n_embed lowercase_ : List[str] = n_layer lowercase_ : List[str] = n_head lowercase_ : Dict = layer_norm_epsilon lowercase_ : Dict = initializer_range lowercase_ : Optional[Any] = use_cache lowercase_ : Tuple = pretraining_tp lowercase_ : Dict = apply_residual_connection_post_layernorm lowercase_ : Optional[int] = hidden_dropout lowercase_ : str = attention_dropout lowercase_ : Dict = bos_token_id lowercase_ : Optional[int] = eos_token_id lowercase_ : Dict = slow_but_exact super().__init__(bos_token_id=snake_case__, eos_token_id=snake_case__, **snake_case__ ) class UpperCamelCase__ ( lowerCamelCase__ ): '''simple docstring''' __a : Dict = version.parse("""1.12""" ) def __init__( self, snake_case__, snake_case__ = "default", snake_case__ = None, snake_case__ = False, ) -> Optional[Any]: """simple docstring""" super().__init__(snake_case__, task=snake_case__, patching_specs=snake_case__, use_past=snake_case__ ) if not getattr(self._config, """pad_token_id""", snake_case__ ): # TODO: how to do that better? lowercase_ : Dict = 0 @property def snake_case__ ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" lowercase_ : Tuple = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(snake_case__, direction="""inputs""", inverted_values_shape=snake_case__ ) lowercase_ : int = {0: """batch""", 1: """past_sequence + sequence"""} else: lowercase_ : Dict = {0: """batch""", 1: """sequence"""} return common_inputs @property def snake_case__ ( self ) -> int: """simple docstring""" return self._config.n_layer @property def snake_case__ ( self ) -> int: """simple docstring""" return self._config.n_head @property def snake_case__ ( self ) -> float: """simple docstring""" return 1E-3 def snake_case__ ( self, snake_case__, snake_case__ = -1, snake_case__ = -1, snake_case__ = False, snake_case__ = None, ) -> Mapping[str, Any]: """simple docstring""" lowercase_ : str = super(snake_case__, self ).generate_dummy_inputs( snake_case__, batch_size=snake_case__, seq_length=snake_case__, is_pair=snake_case__, framework=snake_case__ ) # We need to order the input in the way they appears in the forward() lowercase_ : Union[str, Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch lowercase_ , lowercase_ : Dict = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values lowercase_ : List[str] = seqlen + 2 lowercase_ : Dict = self._config.hidden_size // self.num_attention_heads lowercase_ : Tuple = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) lowercase_ : List[Any] = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) lowercase_ : List[str] = [ (torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers ) ] lowercase_ : Optional[Any] = common_inputs["""attention_mask"""] if self.use_past: lowercase_ : int = ordered_inputs["""attention_mask"""].dtype lowercase_ : Union[str, Any] = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(snake_case__, snake_case__, dtype=snake_case__ )], dim=1 ) return ordered_inputs @property def snake_case__ ( self ) -> int: """simple docstring""" return 13
458
import tensorflow as tf from ...tf_utils import shape_list class UpperCamelCase__ ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__=1, snake_case__=False, **snake_case__ ) -> Optional[int]: """simple docstring""" super().__init__(**snake_case__ ) lowercase_ : Tuple = vocab_size lowercase_ : Union[str, Any] = d_embed lowercase_ : Optional[Any] = d_proj lowercase_ : Optional[Any] = cutoffs + [vocab_size] lowercase_ : Optional[int] = [0] + self.cutoffs lowercase_ : Union[str, Any] = div_val lowercase_ : Union[str, Any] = self.cutoffs[0] lowercase_ : List[str] = len(self.cutoffs ) - 1 lowercase_ : List[str] = self.shortlist_size + self.n_clusters lowercase_ : Tuple = keep_order lowercase_ : Dict = [] lowercase_ : int = [] def snake_case__ ( self, snake_case__ ) -> Optional[int]: """simple docstring""" if self.n_clusters > 0: lowercase_ : List[str] = self.add_weight( shape=(self.n_clusters, self.d_embed), initializer="""zeros""", trainable=snake_case__, name="""cluster_weight""" ) lowercase_ : List[Any] = self.add_weight( shape=(self.n_clusters,), initializer="""zeros""", trainable=snake_case__, name="""cluster_bias""" ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: lowercase_ : Any = self.add_weight( shape=(self.d_embed, self.d_proj), initializer="""zeros""", trainable=snake_case__, name=f"""out_projs_._{i}""", ) self.out_projs.append(snake_case__ ) else: self.out_projs.append(snake_case__ ) lowercase_ : List[str] = self.add_weight( shape=(self.vocab_size, self.d_embed), initializer="""zeros""", trainable=snake_case__, name=f"""out_layers_._{i}_._weight""", ) lowercase_ : List[str] = self.add_weight( shape=(self.vocab_size,), initializer="""zeros""", trainable=snake_case__, name=f"""out_layers_._{i}_._bias""", ) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): lowercase_ , lowercase_ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowercase_ : Union[str, Any] = self.d_embed // (self.div_val**i) lowercase_ : int = self.add_weight( shape=(d_emb_i, self.d_proj), initializer="""zeros""", trainable=snake_case__, name=f"""out_projs_._{i}""" ) self.out_projs.append(snake_case__ ) lowercase_ : Any = self.add_weight( shape=(r_idx - l_idx, d_emb_i), initializer="""zeros""", trainable=snake_case__, name=f"""out_layers_._{i}_._weight""", ) lowercase_ : Tuple = self.add_weight( shape=(r_idx - l_idx,), initializer="""zeros""", trainable=snake_case__, name=f"""out_layers_._{i}_._bias""", ) self.out_layers.append((weight, bias) ) super().build(snake_case__ ) @staticmethod def snake_case__ ( snake_case__, snake_case__, snake_case__, snake_case__=None ) -> Optional[Any]: """simple docstring""" lowercase_ : Dict = x if proj is not None: lowercase_ : List[Any] = tf.einsum("""ibd,ed->ibe""", snake_case__, snake_case__ ) return tf.einsum("""ibd,nd->ibn""", snake_case__, snake_case__ ) + b @staticmethod def snake_case__ ( snake_case__, snake_case__ ) -> List[str]: """simple docstring""" lowercase_ : Optional[int] = shape_list(snake_case__ ) lowercase_ : Optional[Any] = tf.range(lp_size[0], dtype=target.dtype ) lowercase_ : str = tf.stack([r, target], 1 ) return tf.gather_nd(snake_case__, snake_case__ ) def snake_case__ ( self, snake_case__, snake_case__, snake_case__=True, snake_case__=False ) -> Dict: """simple docstring""" lowercase_ : Any = 0 if self.n_clusters == 0: lowercase_ : Union[str, Any] = self._logit(snake_case__, self.out_layers[0][0], self.out_layers[0][1], self.out_projs[0] ) if target is not None: lowercase_ : str = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=snake_case__, logits=snake_case__ ) lowercase_ : Optional[int] = tf.nn.log_softmax(snake_case__, axis=-1 ) else: lowercase_ : int = shape_list(snake_case__ ) lowercase_ : List[Any] = [] lowercase_ : Tuple = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): lowercase_ , lowercase_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: lowercase_ : Tuple = (target >= l_idx) & (target < r_idx) lowercase_ : Union[str, Any] = tf.where(snake_case__ ) lowercase_ : Optional[Any] = tf.boolean_mask(snake_case__, snake_case__ ) - l_idx if self.div_val == 1: lowercase_ : Optional[Any] = self.out_layers[0][0][l_idx:r_idx] lowercase_ : Union[str, Any] = self.out_layers[0][1][l_idx:r_idx] else: lowercase_ : Dict = self.out_layers[i][0] lowercase_ : int = self.out_layers[i][1] if i == 0: lowercase_ : Optional[int] = tf.concat([cur_W, self.cluster_weight], 0 ) lowercase_ : Optional[Any] = tf.concat([cur_b, self.cluster_bias], 0 ) lowercase_ : List[str] = self._logit(snake_case__, snake_case__, snake_case__, self.out_projs[0] ) lowercase_ : List[str] = tf.nn.log_softmax(snake_case__ ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: lowercase_ : int = tf.boolean_mask(snake_case__, snake_case__ ) lowercase_ : Optional[int] = self._gather_logprob(snake_case__, snake_case__ ) else: lowercase_ : List[str] = self._logit(snake_case__, snake_case__, snake_case__, self.out_projs[i] ) lowercase_ : Dict = tf.nn.log_softmax(snake_case__ ) lowercase_ : Optional[int] = self.cutoffs[0] + i - 1 # No probability for the head cluster lowercase_ : int = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(snake_case__ ) if target is not None: lowercase_ : Optional[Any] = tf.boolean_mask(snake_case__, snake_case__ ) lowercase_ : Optional[int] = tf.boolean_mask(snake_case__, snake_case__ ) lowercase_ : Union[str, Any] = self._gather_logprob(snake_case__, snake_case__ ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(snake_case__, -cur_logprob, shape_list(snake_case__ ) ) lowercase_ : List[str] = tf.concat(snake_case__, axis=-1 ) if target is not None: if return_mean: lowercase_ : Tuple = tf.reduce_mean(snake_case__ ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(snake_case__ ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(snake_case__, name=self.name, aggregation="""mean""" if return_mean else """""" ) return out
458
1
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def snake_case ( UpperCAmelCase : str, UpperCAmelCase : complex, UpperCAmelCase : str = "x", UpperCAmelCase : float = 10**-10, UpperCAmelCase : int = 1, ): A = symbols(UpperCAmelCase ) A = lambdify(UpperCAmelCase, UpperCAmelCase ) A = lambdify(UpperCAmelCase, diff(UpperCAmelCase, UpperCAmelCase ) ) A = starting_point while True: if diff_function(UpperCAmelCase ) != 0: A = prev_guess - multiplicity * func(UpperCAmelCase ) / diff_function( UpperCAmelCase ) else: raise ZeroDivisionError('Could not find root' ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess A = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''') # Find root of polynomial # Find fourth Root of 5 print(f'''The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}''') # Find value of e print( 'The root of log(y) - 1 = 0 is ', f'''{newton_raphson("log(y) - 1", 2, variable="y")}''', ) # Exponential Roots print( 'The root of exp(x) - 1 = 0 is', f'''{newton_raphson("exp(x) - 1", 10, precision=0.005)}''', ) # Find root of cos(x) print(f'''The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}''')
110
from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ ): """simple docstring""" snake_case = [R"h\.\d+\.attn\.bias", R"h\.\d+\.attn\.masked_bias"] @register_to_config def __init__( self : Any ,_SCREAMING_SNAKE_CASE : int ,_SCREAMING_SNAKE_CASE : int ,_SCREAMING_SNAKE_CASE : Optional[int] = None ,_SCREAMING_SNAKE_CASE : int = 5_0_2_5_7 ,_SCREAMING_SNAKE_CASE : int = 1_0_2_4 ,_SCREAMING_SNAKE_CASE : int = 7_6_8 ,_SCREAMING_SNAKE_CASE : int = 1_2 ,_SCREAMING_SNAKE_CASE : int = 1_2 ,_SCREAMING_SNAKE_CASE : Optional[int] = None ,_SCREAMING_SNAKE_CASE : str = "gelu_new" ,_SCREAMING_SNAKE_CASE : float = 0.1 ,_SCREAMING_SNAKE_CASE : float = 0.1 ,_SCREAMING_SNAKE_CASE : float = 0.1 ,_SCREAMING_SNAKE_CASE : float = 1E-5 ,_SCREAMING_SNAKE_CASE : float = 0.02 ,_SCREAMING_SNAKE_CASE : bool = True ,_SCREAMING_SNAKE_CASE : bool = True ,_SCREAMING_SNAKE_CASE : bool = False ,_SCREAMING_SNAKE_CASE : bool = False ,) -> Tuple: '''simple docstring''' super().__init__() A = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and' f' `n_embd`: {n_embd} are not equal.' ) A = prefix_inner_dim A = prefix_hidden_dim A = ( nn.Linear(self.prefix_inner_dim ,self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) A = ( nn.Linear(self.prefix_hidden_dim ,_SCREAMING_SNAKE_CASE ) if self.prefix_hidden_dim is not None else nn.Identity() ) A = GPTaConfig( vocab_size=_SCREAMING_SNAKE_CASE ,n_positions=_SCREAMING_SNAKE_CASE ,n_embd=_SCREAMING_SNAKE_CASE ,n_layer=_SCREAMING_SNAKE_CASE ,n_head=_SCREAMING_SNAKE_CASE ,n_inner=_SCREAMING_SNAKE_CASE ,activation_function=_SCREAMING_SNAKE_CASE ,resid_pdrop=_SCREAMING_SNAKE_CASE ,embd_pdrop=_SCREAMING_SNAKE_CASE ,attn_pdrop=_SCREAMING_SNAKE_CASE ,layer_norm_epsilon=_SCREAMING_SNAKE_CASE ,initializer_range=_SCREAMING_SNAKE_CASE ,scale_attn_weights=_SCREAMING_SNAKE_CASE ,use_cache=_SCREAMING_SNAKE_CASE ,scale_attn_by_inverse_layer_idx=_SCREAMING_SNAKE_CASE ,reorder_and_upcast_attn=_SCREAMING_SNAKE_CASE ,) A = GPTaLMHeadModel(_SCREAMING_SNAKE_CASE ) def A( self : Optional[int] ,_SCREAMING_SNAKE_CASE : torch.Tensor ,_SCREAMING_SNAKE_CASE : torch.Tensor ,_SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None ,_SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None ,) -> Tuple: '''simple docstring''' A = self.transformer.transformer.wte(_SCREAMING_SNAKE_CASE ) A = self.encode_prefix(_SCREAMING_SNAKE_CASE ) A = self.decode_prefix(_SCREAMING_SNAKE_CASE ) A = torch.cat((prefix_embeds, embedding_text) ,dim=1 ) if labels is not None: A = self.get_dummy_token(input_ids.shape[0] ,input_ids.device ) A = torch.cat((dummy_token, input_ids) ,dim=1 ) A = self.transformer(inputs_embeds=_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def A( self : Any ,_SCREAMING_SNAKE_CASE : int ,_SCREAMING_SNAKE_CASE : torch.device ) -> torch.Tensor: '''simple docstring''' return torch.zeros(_SCREAMING_SNAKE_CASE ,self.prefix_length ,dtype=torch.intaa ,device=_SCREAMING_SNAKE_CASE ) def A( self : List[str] ,_SCREAMING_SNAKE_CASE : int ) -> Optional[Any]: '''simple docstring''' return self.encode_prefix(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def A( self : List[str] ,_SCREAMING_SNAKE_CASE : int ,_SCREAMING_SNAKE_CASE : int ,_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str: '''simple docstring''' A = torch.split(_SCREAMING_SNAKE_CASE ,1 ,dim=0 ) A = [] A = [] for feature in features: A = self.decode_prefix(feature.to(_SCREAMING_SNAKE_CASE ) ) # back to the clip feature # Only support beam search for now A , A = self.generate_beam( input_embeds=_SCREAMING_SNAKE_CASE ,device=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) A = torch.stack(_SCREAMING_SNAKE_CASE ) A = torch.stack(_SCREAMING_SNAKE_CASE ) return generated_tokens, generated_seq_lengths @torch.no_grad() def A( self : Dict ,_SCREAMING_SNAKE_CASE : List[str]=None ,_SCREAMING_SNAKE_CASE : str=None ,_SCREAMING_SNAKE_CASE : Any=None ,_SCREAMING_SNAKE_CASE : int = 5 ,_SCREAMING_SNAKE_CASE : int = 6_7 ,_SCREAMING_SNAKE_CASE : float = 1.0 ,_SCREAMING_SNAKE_CASE : Optional[int] = None ,) -> str: '''simple docstring''' A = eos_token_id A = None A = None A = torch.ones(_SCREAMING_SNAKE_CASE ,device=_SCREAMING_SNAKE_CASE ,dtype=torch.int ) A = torch.zeros(_SCREAMING_SNAKE_CASE ,device=_SCREAMING_SNAKE_CASE ,dtype=torch.bool ) if input_embeds is not None: A = input_embeds else: A = self.transformer.transformer.wte(_SCREAMING_SNAKE_CASE ) for i in range(_SCREAMING_SNAKE_CASE ): A = self.transformer(inputs_embeds=_SCREAMING_SNAKE_CASE ) A = outputs.logits A = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) A = logits.softmax(-1 ).log() if scores is None: A , A = logits.topk(_SCREAMING_SNAKE_CASE ,-1 ) A = generated.expand(_SCREAMING_SNAKE_CASE ,*generated.shape[1:] ) A , A = next_tokens.permute(1 ,0 ), scores.squeeze(0 ) if tokens is None: A = next_tokens else: A = tokens.expand(_SCREAMING_SNAKE_CASE ,*tokens.shape[1:] ) A = torch.cat((tokens, next_tokens) ,dim=1 ) else: A = -float(np.inf ) A = 0 A = scores[:, None] + logits seq_lengths[~is_stopped] += 1 A = scores_sum / seq_lengths[:, None] A , A = scores_sum_average.view(-1 ).topk(_SCREAMING_SNAKE_CASE ,-1 ) A = next_tokens // scores_sum.shape[1] A = seq_lengths[next_tokens_source] A = next_tokens % scores_sum.shape[1] A = next_tokens.unsqueeze(1 ) A = tokens[next_tokens_source] A = torch.cat((tokens, next_tokens) ,dim=1 ) A = generated[next_tokens_source] A = scores_sum_average * seq_lengths A = is_stopped[next_tokens_source] A = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] ,1 ,-1 ) A = torch.cat((generated, next_token_embed) ,dim=1 ) A = is_stopped + next_tokens.eq(_SCREAMING_SNAKE_CASE ).squeeze() if is_stopped.all(): break A = scores / seq_lengths A = scores.argsort(descending=_SCREAMING_SNAKE_CASE ) # tokens tensors are already padded to max_seq_length A = [tokens[i] for i in order] A = torch.stack(_SCREAMING_SNAKE_CASE ,dim=0 ) A = torch.tensor([seq_lengths[i] for i in order] ,dtype=seq_lengths.dtype ) return output_texts, seq_lengths
110
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json', } class UpperCAmelCase_ ( snake_case ): UpperCamelCase ="lxmert" UpperCamelCase ={} def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=95_00 , UpperCamelCase_=16_00 , UpperCamelCase_=4_00 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=9 , UpperCamelCase_=5 , UpperCamelCase_=5 , UpperCamelCase_=20_48 , UpperCamelCase_=4 , UpperCamelCase_=6.6_7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , **UpperCamelCase_ , ) -> List[Any]: __lowercase : Optional[Any] = vocab_size __lowercase : Any = hidden_size __lowercase : Any = num_attention_heads __lowercase : List[Any] = hidden_act __lowercase : Optional[int] = intermediate_size __lowercase : str = hidden_dropout_prob __lowercase : Union[str, Any] = attention_probs_dropout_prob __lowercase : Tuple = max_position_embeddings __lowercase : Optional[Any] = type_vocab_size __lowercase : Optional[int] = initializer_range __lowercase : List[str] = layer_norm_eps __lowercase : str = num_qa_labels __lowercase : Optional[Any] = num_object_labels __lowercase : Dict = num_attr_labels __lowercase : Dict = l_layers __lowercase : str = x_layers __lowercase : List[Any] = r_layers __lowercase : List[Any] = visual_feat_dim __lowercase : Tuple = visual_pos_dim __lowercase : Union[str, Any] = visual_loss_normalizer __lowercase : str = task_matched __lowercase : Any = task_mask_lm __lowercase : List[str] = task_obj_predict __lowercase : Tuple = task_qa __lowercase : int = visual_obj_loss __lowercase : int = visual_attr_loss __lowercase : Optional[Any] = visual_feat_loss __lowercase : Union[str, Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers} super().__init__(**UpperCamelCase_ )
76
import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE_ ( _a ): """simple docstring""" __lowerCAmelCase : int ='''summarization''' __lowerCAmelCase : str =['''loss'''] __lowerCAmelCase : Dict =ROUGE_KEYS __lowerCAmelCase : str ='''rouge2''' def __init__( self :Dict, snake_case :List[Any], **snake_case :Tuple): """simple docstring""" if hparams.sortish_sampler and hparams.gpus > 1: _lowercase =False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training') if hparams.sortish_sampler: raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously') super().__init__(snake_case, num_labels=snake_case, mode=self.mode, **snake_case) use_task_specific_params(self.model, 'summarization') save_git_info(self.hparams.output_dir) _lowercase =Path(self.output_dir) / 'metrics.json' _lowercase =Path(self.output_dir) / 'hparams.pkl' pickle_save(self.hparams, self.hparams_save_path) _lowercase =0 _lowercase =defaultdict(snake_case) _lowercase =self.config.model_type _lowercase =self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size _lowercase ={ "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } _lowercase ={ 'train': self.hparams.n_train, 'val': self.hparams.n_val, 'test': self.hparams.n_test, } _lowercase ={k: v if v >= 0 else None for k, v in n_observations_per_split.items()} _lowercase ={ 'train': self.hparams.max_target_length, 'val': self.hparams.val_max_target_length, 'test': self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}''' assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}''' if self.hparams.freeze_embeds: freeze_embeds(self.model) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder()) assert_all_frozen(self.model.get_encoder()) _lowercase =get_git_info()['repo_sha'] _lowercase =hparams.num_workers _lowercase =None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer, snake_case): _lowercase =self.tokenizer.lang_code_to_id[hparams.tgt_lang] _lowercase =self.decoder_start_token_id _lowercase =( SeqaSeqDataset if hasattr(self.tokenizer, 'prepare_seq2seq_batch') else LegacySeqaSeqDataset ) _lowercase =False _lowercase =self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: _lowercase =self.hparams.eval_max_gen_length else: _lowercase =self.model.config.max_length _lowercase =self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def UpperCamelCase__ ( self :str, snake_case :Dict[str, torch.Tensor]): """simple docstring""" _lowercase ={ k: self.tokenizer.batch_decode(v.tolist()) if 'mask' not in k else v.shape for k, v in batch.items() } save_json(snake_case, Path(self.output_dir) / 'text_batch.json') save_json({k: v.tolist() for k, v in batch.items()}, Path(self.output_dir) / 'tok_batch.json') _lowercase =True return readable_batch def UpperCamelCase__ ( self :Dict, snake_case :List[str], **snake_case :List[Any]): """simple docstring""" return self.model(snake_case, **snake_case) def UpperCamelCase__ ( self :Any, snake_case :List[int]): """simple docstring""" _lowercase =self.tokenizer.batch_decode( snake_case, skip_special_tokens=snake_case, clean_up_tokenization_spaces=snake_case) return lmap(str.strip, snake_case) def UpperCamelCase__ ( self :Union[str, Any], snake_case :dict): """simple docstring""" _lowercase =self.tokenizer.pad_token_id _lowercase , _lowercase =batch['input_ids'], batch['attention_mask'] _lowercase =batch['labels'] if isinstance(self.model, snake_case): _lowercase =self.model._shift_right(snake_case) else: _lowercase =shift_tokens_right(snake_case, snake_case) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero _lowercase =decoder_input_ids self.save_readable_batch(snake_case) _lowercase =self(snake_case, attention_mask=snake_case, decoder_input_ids=snake_case, use_cache=snake_case) _lowercase =outputs['logits'] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id _lowercase =nn.CrossEntropyLoss(ignore_index=snake_case) assert lm_logits.shape[-1] == self.vocab_size _lowercase =ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1]), tgt_ids.view(-1)) else: _lowercase =nn.functional.log_softmax(snake_case, dim=-1) _lowercase , _lowercase =label_smoothed_nll_loss( snake_case, snake_case, self.hparams.label_smoothing, ignore_index=snake_case) return (loss,) @property def UpperCamelCase__ ( self :Dict): """simple docstring""" return self.tokenizer.pad_token_id def UpperCamelCase__ ( self :Tuple, snake_case :Dict, snake_case :str): """simple docstring""" _lowercase =self._step(snake_case) _lowercase =dict(zip(self.loss_names, snake_case)) # tokens per batch _lowercase =batch['input_ids'].ne(self.pad).sum() + batch['labels'].ne(self.pad).sum() _lowercase =batch['input_ids'].shape[0] _lowercase =batch['input_ids'].eq(self.pad).sum() _lowercase =batch['input_ids'].eq(self.pad).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def UpperCamelCase__ ( self :List[Any], snake_case :Dict, snake_case :List[Any]): """simple docstring""" return self._generative_step(snake_case) def UpperCamelCase__ ( self :List[Any], snake_case :List[str], snake_case :str="val"): """simple docstring""" self.step_count += 1 _lowercase ={k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names} _lowercase =losses['loss'] _lowercase ={ k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ['gen_time', 'gen_len'] } _lowercase =( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) _lowercase =torch.tensor(snake_case).type_as(snake_case) generative_metrics.update({k: v.item() for k, v in losses.items()}) losses.update(snake_case) _lowercase ={f'''{prefix}_avg_{k}''': x for k, x in losses.items()} _lowercase =self.step_count self.metrics[prefix].append(snake_case) # callback writes this to self.metrics_save_path _lowercase =flatten_list([x['preds'] for x in outputs]) return { "log": all_metrics, "preds": preds, f'''{prefix}_loss''': loss, f'''{prefix}_{self.val_metric}''': metric_tensor, } def UpperCamelCase__ ( self :Any, snake_case :str, snake_case :List[str]): """simple docstring""" return calculate_rouge(snake_case, snake_case) def UpperCamelCase__ ( self :str, snake_case :dict): """simple docstring""" _lowercase =time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') _lowercase =self.model.generate( batch['input_ids'], attention_mask=batch['attention_mask'], use_cache=snake_case, decoder_start_token_id=self.decoder_start_token_id, num_beams=self.eval_beams, max_length=self.eval_max_length, ) _lowercase =(time.time() - ta) / batch['input_ids'].shape[0] _lowercase =self.ids_to_clean_text(snake_case) _lowercase =self.ids_to_clean_text(batch['labels']) _lowercase =self._step(snake_case) _lowercase =dict(zip(self.loss_names, snake_case)) _lowercase =self.calc_generative_metrics(snake_case, snake_case) _lowercase =np.mean(lmap(snake_case, snake_case)) base_metrics.update(gen_time=snake_case, gen_len=snake_case, preds=snake_case, target=snake_case, **snake_case) return base_metrics def UpperCamelCase__ ( self :Optional[Any], snake_case :str, snake_case :Optional[int]): """simple docstring""" return self._generative_step(snake_case) def UpperCamelCase__ ( self :Union[str, Any], snake_case :List[str]): """simple docstring""" return self.validation_epoch_end(snake_case, prefix='test') def UpperCamelCase__ ( self :List[Any], snake_case :str): """simple docstring""" _lowercase =self.n_obs[type_path] _lowercase =self.target_lens[type_path] _lowercase =self.dataset_class( self.tokenizer, type_path=snake_case, n_obs=snake_case, max_target_length=snake_case, **self.dataset_kwargs, ) return dataset def UpperCamelCase__ ( self :str, snake_case :str, snake_case :int, snake_case :bool = False): """simple docstring""" _lowercase =self.get_dataset(snake_case) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": _lowercase =dataset.make_sortish_sampler(snake_case, distributed=self.hparams.gpus > 1) return DataLoader( snake_case, batch_size=snake_case, collate_fn=dataset.collate_fn, shuffle=snake_case, num_workers=self.num_workers, sampler=snake_case, ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": _lowercase =dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch, distributed=self.hparams.gpus > 1) return DataLoader( snake_case, batch_sampler=snake_case, collate_fn=dataset.collate_fn, num_workers=self.num_workers, ) else: return DataLoader( snake_case, batch_size=snake_case, collate_fn=dataset.collate_fn, shuffle=snake_case, num_workers=self.num_workers, sampler=snake_case, ) def UpperCamelCase__ ( self :Optional[Any]): """simple docstring""" _lowercase =self.get_dataloader('train', batch_size=self.hparams.train_batch_size, shuffle=snake_case) return dataloader def UpperCamelCase__ ( self :Dict): """simple docstring""" return self.get_dataloader('val', batch_size=self.hparams.eval_batch_size) def UpperCamelCase__ ( self :int): """simple docstring""" return self.get_dataloader('test', batch_size=self.hparams.eval_batch_size) @staticmethod def UpperCamelCase__ ( snake_case :Dict, snake_case :List[str]): """simple docstring""" BaseTransformer.add_model_specific_args(snake_case, snake_case) add_generic_args(snake_case, snake_case) parser.add_argument( '--max_source_length', default=1024, type=snake_case, help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ), ) parser.add_argument( '--max_target_length', default=56, type=snake_case, help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ), ) parser.add_argument( '--val_max_target_length', default=142, type=snake_case, help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ), ) parser.add_argument( '--test_max_target_length', default=142, type=snake_case, help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ), ) parser.add_argument('--freeze_encoder', action='store_true') parser.add_argument('--freeze_embeds', action='store_true') parser.add_argument('--sortish_sampler', action='store_true', default=snake_case) parser.add_argument('--overwrite_output_dir', action='store_true', default=snake_case) parser.add_argument('--max_tokens_per_batch', type=snake_case, default=snake_case) parser.add_argument('--logger_name', type=snake_case, choices=['default', 'wandb', 'wandb_shared'], default='default') parser.add_argument('--n_train', type=snake_case, default=-1, required=snake_case, help='# examples. -1 means use all.') parser.add_argument('--n_val', type=snake_case, default=500, required=snake_case, help='# examples. -1 means use all.') parser.add_argument('--n_test', type=snake_case, default=-1, required=snake_case, help='# examples. -1 means use all.') parser.add_argument( '--task', type=snake_case, default='summarization', required=snake_case, help='# examples. -1 means use all.') parser.add_argument('--label_smoothing', type=snake_case, default=0.0, required=snake_case) parser.add_argument('--src_lang', type=snake_case, default='', required=snake_case) parser.add_argument('--tgt_lang', type=snake_case, default='', required=snake_case) parser.add_argument('--eval_beams', type=snake_case, default=snake_case, required=snake_case) parser.add_argument( '--val_metric', type=snake_case, default=snake_case, required=snake_case, choices=['bleu', 'rouge2', 'loss', None]) parser.add_argument('--eval_max_gen_length', type=snake_case, default=snake_case, help='never generate more than n tokens') parser.add_argument('--save_top_k', type=snake_case, default=1, required=snake_case, help='How many checkpoints to save') parser.add_argument( '--early_stopping_patience', type=snake_case, default=-1, required=snake_case, help=( '-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So' ' val_check_interval will effect it.' ), ) return parser class SCREAMING_SNAKE_CASE_ ( _a ): """simple docstring""" __lowerCAmelCase : Union[str, Any] ='''translation''' __lowerCAmelCase : Any =['''loss'''] __lowerCAmelCase : Optional[int] =['''bleu'''] __lowerCAmelCase : Any ='''bleu''' def __init__( self :str, snake_case :Union[str, Any], **snake_case :Any): """simple docstring""" super().__init__(snake_case, **snake_case) _lowercase =hparams.src_lang _lowercase =hparams.tgt_lang def UpperCamelCase__ ( self :Dict, snake_case :Union[str, Any], snake_case :Any): """simple docstring""" return calculate_bleu(snake_case, snake_case) def _snake_case (_snake_case : Dict , _snake_case : int=None) -> SummarizationModule: Path(args.output_dir).mkdir(exist_ok=_snake_case) check_output_dir(_snake_case , expected_items=3) if model is None: if "summarization" in args.task: _lowercase =SummarizationModule(_snake_case) else: _lowercase =TranslationModule(_snake_case) _lowercase =Path(args.data_dir).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir).startswith('/tmp') or str(args.output_dir).startswith('/var') ): _lowercase =True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger _lowercase =os.environ.get('WANDB_PROJECT' , _snake_case) _lowercase =WandbLogger(name=model.output_dir.name , project=_snake_case) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger _lowercase =WandbLogger(name=model.output_dir.name , project=f'''hf_{dataset}''') if args.early_stopping_patience >= 0: _lowercase =get_early_stopping_callback(model.val_metric , args.early_stopping_patience) else: _lowercase =False _lowercase =args.val_metric == 'loss' _lowercase =generic_train( _snake_case , _snake_case , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , _snake_case) , early_stopping_callback=_snake_case , logger=_snake_case , ) pickle_save(model.hparams , model.output_dir / 'hparams.pkl') if not args.do_predict: return model _lowercase ='' _lowercase =sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt') , recursive=_snake_case)) if checkpoints: _lowercase =checkpoints[-1] _lowercase =checkpoints[-1] trainer.logger.log_hyperparams(model.hparams) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() _SCREAMING_SNAKE_CASE = pl.Trainer.add_argparse_args(parser) _SCREAMING_SNAKE_CASE = SummarizationModule.add_model_specific_args(parser, os.getcwd()) _SCREAMING_SNAKE_CASE = parser.parse_args() main(args)
181
0
"""simple docstring""" import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class __lowercase ( unittest.TestCase ): '''simple docstring''' def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=4 , ): __a : List[str] = parent __a : int = batch_size __a : List[Any] = seq_length __a : Optional[int] = is_training __a : int = use_attention_mask __a : int = use_token_type_ids __a : int = use_labels __a : Optional[Any] = vocab_size __a : Optional[int] = hidden_size __a : Tuple = num_hidden_layers __a : Dict = num_attention_heads __a : List[str] = intermediate_size __a : int = hidden_act __a : str = hidden_dropout_prob __a : Optional[Any] = attention_probs_dropout_prob __a : Optional[int] = max_position_embeddings __a : int = type_vocab_size __a : int = type_sequence_label_size __a : Optional[int] = initializer_range __a : str = num_choices def _lowerCamelCase ( self ): __a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : Dict = None if self.use_attention_mask: __a : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __a : Dict = None if self.use_token_type_ids: __a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __a : Optional[Any] = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowerCamelCase ( self ): __a : int = self.prepare_config_and_inputs() __a , __a , __a , __a : List[Any] = config_and_inputs __a : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class __lowercase ( _UpperCamelCase , unittest.TestCase ): '''simple docstring''' __lowerCAmelCase = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def _lowerCamelCase ( self ): __a : Tuple = FlaxAlbertModelTester(self ) @slow def _lowerCamelCase ( self ): for model_class_name in self.all_model_classes: __a : Optional[int] = model_class_name.from_pretrained('''albert-base-v2''' ) __a : Tuple = model(np.ones((1, 1) ) ) self.assertIsNotNone(_UpperCAmelCase ) @require_flax class __lowercase ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCamelCase ( self ): __a : Dict = FlaxAlbertModel.from_pretrained('''albert-base-v2''' ) __a : Optional[int] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __a : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) __a : str = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0] __a : Tuple = (1, 11, 768) self.assertEqual(output.shape , _UpperCAmelCase ) __a : Dict = np.array( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
101
"""simple docstring""" from __future__ import annotations import typing from collections import Counter def __A ( a_ :int) -> typing.Counter[int]: __a : typing.Counter[int] = Counter() for base in range(1 , max_perimeter + 1): for perpendicular in range(a_ , max_perimeter + 1): __a : Any = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(a_): __a : List[Any] = int(base + perpendicular + hypotenuse) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def __A ( a_ :int = 10_00) -> int: __a : Dict = pythagorean_triple(a_) return triplets.most_common(1)[0][0] if __name__ == "__main__": print(F'Perimeter {solution()} has maximum solutions')
101
1
'''simple docstring''' import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : str=10 , lowerCAmelCase__ : Optional[Any]=[10, 20, 30, 40] , lowerCAmelCase__ : Optional[Any]=[1, 1, 2, 1] , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : int="relu" , lowerCAmelCase__ : str=3 , lowerCAmelCase__ : Dict=None , ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = image_size _UpperCamelCase = num_channels _UpperCamelCase = embeddings_size _UpperCamelCase = hidden_sizes _UpperCamelCase = depths _UpperCamelCase = is_training _UpperCamelCase = use_labels _UpperCamelCase = hidden_act _UpperCamelCase = num_labels _UpperCamelCase = scope _UpperCamelCase = len(lowerCAmelCase__ ) def snake_case__ ( self : List[Any] ) -> Dict: '''simple docstring''' _UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCamelCase = self.get_config() return config, pixel_values def snake_case__ ( self : str ) -> Any: '''simple docstring''' return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def snake_case__ ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = FlaxRegNetModel(config=lowerCAmelCase__ ) _UpperCamelCase = model(lowerCAmelCase__ ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def snake_case__ ( self : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] ) -> Tuple: '''simple docstring''' _UpperCamelCase = self.num_labels _UpperCamelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase__ ) _UpperCamelCase = model(lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case__ ( self : int ) -> Any: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase = config_and_inputs _UpperCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : List[str] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () _snake_case : Any = False _snake_case : Dict = False _snake_case : Optional[int] = False def snake_case__ ( self : str ) -> None: '''simple docstring''' _UpperCamelCase = FlaxRegNetModelTester(self ) _UpperCamelCase = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ ) def snake_case__ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case__ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' return def snake_case__ ( self : Dict ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def snake_case__ ( self : Any ) -> Tuple: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ ) @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def snake_case__ ( self : Dict ) -> Dict: '''simple docstring''' pass @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def snake_case__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' pass def snake_case__ ( self : Dict ) -> Optional[int]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(lowerCAmelCase__ ) _UpperCamelCase = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase = [*signature.parameters.keys()] _UpperCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) def snake_case__ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' def check_hidden_states_output(lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] ): _UpperCamelCase = model_class(lowerCAmelCase__ ) _UpperCamelCase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) _UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(lowerCAmelCase__ ) , expected_num_stages + 1 ) _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCamelCase = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def snake_case__ ( self : Dict ) -> Optional[int]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _UpperCamelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = model_class(lowerCAmelCase__ ) @jax.jit def model_jitted(lowerCAmelCase__ : List[str] , **lowerCAmelCase__ : Optional[int] ): return model(pixel_values=lowerCAmelCase__ , **lowerCAmelCase__ ) with self.subTest('''JIT Enabled''' ): _UpperCamelCase = model_jitted(**lowerCAmelCase__ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): _UpperCamelCase = model_jitted(**lowerCAmelCase__ ).to_tuple() self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) ) for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) def a__ ( ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_flax class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case__ ( self : Tuple ) -> Dict: '''simple docstring''' return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None @slow def snake_case__ ( self : Tuple ) -> Any: '''simple docstring''' _UpperCamelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' ) _UpperCamelCase = self.default_image_processor _UpperCamelCase = prepare_img() _UpperCamelCase = image_processor(images=lowerCAmelCase__ , return_tensors='''np''' ) _UpperCamelCase = model(**lowerCAmelCase__ ) # verify the logits _UpperCamelCase = (1, 1000) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) _UpperCamelCase = jnp.array([-0.4180, -1.5051, -3.4836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
98
'''simple docstring''' import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" _snake_case : int = OpenAIGPTTokenizer _snake_case : Tuple = OpenAIGPTTokenizerFast _snake_case : Union[str, Any] = True _snake_case : Optional[int] = False def snake_case__ ( self : Dict ) -> List[Any]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _UpperCamelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] _UpperCamelCase = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) _UpperCamelCase = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', ''''''] _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(lowerCAmelCase__ ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(lowerCAmelCase__ ) ) def snake_case__ ( self : Tuple , lowerCAmelCase__ : List[str] ) -> Optional[Any]: '''simple docstring''' return "lower newer", "lower newer" def snake_case__ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) _UpperCamelCase = '''lower''' _UpperCamelCase = ['''low''', '''er</w>'''] _UpperCamelCase = tokenizer.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCamelCase = tokens + ['''<unk>'''] _UpperCamelCase = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ ) def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tuple=15 ) -> Union[str, Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _UpperCamelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) # Simple input _UpperCamelCase = '''This is a simple input''' _UpperCamelCase = ['''This is a simple input 1''', '''This is a simple input 2'''] _UpperCamelCase = ('''This is a simple input''', '''This is a pair''') _UpperCamelCase = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' ) # Simple input self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' ) # Simple input self.assertRaises( lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' , ) # Pair input self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' ) # Pair input self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' ) # Pair input self.assertRaises( lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' , ) def snake_case__ ( self : Tuple ) -> Optional[Any]: '''simple docstring''' pass @require_ftfy @require_spacy @require_tokenizers class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" pass
98
1
class a_( lowercase__ ): """simple docstring""" pass class a_( lowercase__ ): """simple docstring""" pass class a_: """simple docstring""" def __init__( self : Any) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE = [ [], [], [], ] def __UpperCamelCase ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int) -> None: """simple docstring""" try: if len(self.queues[priority]) >= 1_0_0: raise OverflowError('Maximum queue size is 100') self.queues[priority].append(lowerCAmelCase__) except IndexError: raise ValueError('Valid priorities are 0, 1, and 2') def __UpperCamelCase ( self : List[str]) -> int: """simple docstring""" for queue in self.queues: if queue: return queue.pop(0) raise UnderFlowError('All queues are empty') def __str__( self : int) -> str: """simple docstring""" return "\n".join(f'''Priority {i}: {q}''' for i, q in enumerate(self.queues)) class a_: """simple docstring""" def __init__( self : str) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE = [] def __UpperCamelCase ( self : List[Any] , lowerCAmelCase__ : int) -> None: """simple docstring""" if len(self.queue) == 1_0_0: raise OverFlowError('Maximum queue size is 100') self.queue.append(lowerCAmelCase__) def __UpperCamelCase ( self : Tuple) -> int: """simple docstring""" if not self.queue: raise UnderFlowError('The queue is empty') else: SCREAMING_SNAKE_CASE = min(self.queue) self.queue.remove(lowerCAmelCase__) return data def __str__( self : List[Any]) -> str: """simple docstring""" return str(self.queue) def A_ ( ) ->Any: """simple docstring""" SCREAMING_SNAKE_CASE = FixedPriorityQueue() fpq.enqueue(0 , 1_0 ) fpq.enqueue(1 , 7_0 ) fpq.enqueue(0 , 1_0_0 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 6_4 ) fpq.enqueue(0 , 1_2_8 ) print(lowercase_ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(lowercase_ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def A_ ( ) ->int: """simple docstring""" SCREAMING_SNAKE_CASE = ElementPriorityQueue() epq.enqueue(1_0 ) epq.enqueue(7_0 ) epq.enqueue(1_0_0 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(6_4 ) epq.enqueue(1_2_8 ) print(lowercase_ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(lowercase_ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
259
__UpperCAmelCase = 9.80_665 def A_ ( lowercase_ , lowercase_ , lowercase_ = g ) ->float: """simple docstring""" if fluid_density <= 0: raise ValueError('Impossible fluid density' ) if volume < 0: raise ValueError('Impossible Object volume' ) if gravity <= 0: raise ValueError('Impossible Gravity' ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
259
1
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel snake_case__ : Optional[int] = """0.12""" # assumed parallelism: 8 @require_flax @is_staging_test class _A ( unittest.TestCase ): '''simple docstring''' @classmethod def _snake_case ( cls : List[Any] ): '''simple docstring''' __lowercase = TOKEN HfFolder.save_token(lowerCamelCase ) @classmethod def _snake_case ( cls : Union[str, Any] ): '''simple docstring''' try: delete_repo(token=cls._token , repo_id="test-model-flax" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" ) except HTTPError: pass def _snake_case ( self : Union[str, Any] ): '''simple docstring''' __lowercase = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) __lowercase = FlaxBertModel(lowerCamelCase ) model.push_to_hub("test-model-flax" , use_auth_token=self._token ) __lowercase = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) __lowercase = flatten_dict(unfreeze(model.params ) ) __lowercase = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __lowercase = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(lowerCamelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id="test-model-flax" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(lowerCamelCase , repo_id="test-model-flax" , push_to_hub=lowerCamelCase , use_auth_token=self._token ) __lowercase = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) __lowercase = flatten_dict(unfreeze(model.params ) ) __lowercase = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __lowercase = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(lowerCamelCase , 1e-3 , msg=f"""{key} not identical""" ) def _snake_case ( self : Any ): '''simple docstring''' __lowercase = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) __lowercase = FlaxBertModel(lowerCamelCase ) model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token ) __lowercase = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" ) __lowercase = flatten_dict(unfreeze(model.params ) ) __lowercase = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __lowercase = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(lowerCamelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( lowerCamelCase , repo_id="valid_org/test-model-flax-org" , push_to_hub=lowerCamelCase , use_auth_token=self._token ) __lowercase = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" ) __lowercase = flatten_dict(unfreeze(model.params ) ) __lowercase = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): __lowercase = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(lowerCamelCase , 1e-3 , msg=f"""{key} not identical""" ) def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowercase = True __lowercase = flatten_dict(modela.params ) __lowercase = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: __lowercase = False return models_are_equal @require_flax class _A ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : Dict ): '''simple docstring''' __lowercase = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) __lowercase = FlaxBertModel(lowerCamelCase ) __lowercase = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(lowerCamelCase , lowerCamelCase ) ) with self.assertRaises(lowerCamelCase ): __lowercase = FlaxBertModel.from_pretrained(lowerCamelCase ) __lowercase = FlaxBertModel.from_pretrained(lowerCamelCase , subfolder=lowerCamelCase ) self.assertTrue(check_models_equal(lowerCamelCase , lowerCamelCase ) ) def _snake_case ( self : int ): '''simple docstring''' __lowercase = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) __lowercase = FlaxBertModel(lowerCamelCase ) __lowercase = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(lowerCamelCase , lowerCamelCase ) , max_shard_size="10KB" ) with self.assertRaises(lowerCamelCase ): __lowercase = FlaxBertModel.from_pretrained(lowerCamelCase ) __lowercase = FlaxBertModel.from_pretrained(lowerCamelCase , subfolder=lowerCamelCase ) self.assertTrue(check_models_equal(lowerCamelCase , lowerCamelCase ) ) def _snake_case ( self : Optional[int] ): '''simple docstring''' __lowercase = "bert" __lowercase = "hf-internal-testing/tiny-random-bert-subfolder" with self.assertRaises(lowerCamelCase ): __lowercase = FlaxBertModel.from_pretrained(lowerCamelCase ) __lowercase = FlaxBertModel.from_pretrained(lowerCamelCase , subfolder=lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) def _snake_case ( self : Optional[int] ): '''simple docstring''' __lowercase = "bert" __lowercase = "hf-internal-testing/tiny-random-bert-sharded-subfolder" with self.assertRaises(lowerCamelCase ): __lowercase = FlaxBertModel.from_pretrained(lowerCamelCase ) __lowercase = FlaxBertModel.from_pretrained(lowerCamelCase , subfolder=lowerCamelCase ) self.assertIsNotNone(lowerCamelCase )
402
from math import factorial, radians def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1_8 , _SCREAMING_SNAKE_CASE = 1_0 ): __lowercase = angle_in_degrees - ((angle_in_degrees // 3_6_0.0) * 3_6_0.0) # Converting from degrees to radians __lowercase = radians(_SCREAMING_SNAKE_CASE ) __lowercase = angle_in_radians __lowercase = 3 __lowercase = -1 for _ in range(_SCREAMING_SNAKE_CASE ): result += (b * (angle_in_radians**a)) / factorial(_SCREAMING_SNAKE_CASE ) __lowercase = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __import__("""doctest""").testmod()
402
1
'''simple docstring''' def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = int(lowerCamelCase_ ) if decimal in (0, 1): # Exit cases for the recursion return str(lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = divmod(lowerCamelCase_ , 2 ) return binary_recursive(lowerCamelCase_ ) + str(lowerCamelCase_ ) def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = str(lowerCamelCase_ ).strip() if not number: raise ValueError("""No input value was provided""" ) SCREAMING_SNAKE_CASE : List[Any] = """-""" if number.startswith("""-""" ) else """""" SCREAMING_SNAKE_CASE : Dict = number.lstrip("""-""" ) if not number.isnumeric(): raise ValueError("""Input value is not an integer""" ) return f'''{negative}0b{binary_recursive(int(lowerCamelCase_ ) )}''' if __name__ == "__main__": from doctest import testmod testmod()
703
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None # Automatically constructed SCREAMING_SNAKE_CASE__ = "dict" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = field(default='''Translation''' , init=lowercase_ , repr=lowercase_ ) def __call__( self : int ): '''simple docstring''' return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None # Automatically constructed SCREAMING_SNAKE_CASE__ = "dict" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = field(default='''TranslationVariableLanguages''' , init=lowercase_ , repr=lowercase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = sorted(set(self.languages ) ) if self.languages else None SCREAMING_SNAKE_CASE : str = len(self.languages ) if self.languages else None def __call__( self : Tuple ): '''simple docstring''' return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = set(self.languages ) if self.languages and set(lowerCamelCase_ ) - lang_set: raise ValueError( f'''Some languages in example ({", ".join(sorted(set(lowerCamelCase_ ) - lang_set ) )}) are not in valid set ({", ".join(lowerCamelCase_ )}).''' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. SCREAMING_SNAKE_CASE : List[Any] = [] for lang, text in translation_dict.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = zip(*sorted(lowerCamelCase_ ) ) return {"language": languages, "translation": translations} def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
79
0
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = 'hf-internal-testing/tiny-random-t5' A_ : Optional[int] = AutoTokenizer.from_pretrained(lowercase ) A_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(lowercase ) A_ : Optional[int] = tokenizer('This is me' , return_tensors='pt' ) A_ : Dict = model.to_bettertransformer() self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) A_ : Optional[int] = model.generate(**lowercase ) A_ : Any = model.reverse_bettertransformer() self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase ) A_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(lowercase ) self.assertFalse( any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) A_ : Tuple = model_reloaded.generate(**lowercase ) self.assertTrue(torch.allclose(lowercase , lowercase ) ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[Any] = 'hf-internal-testing/tiny-random-t5' A_ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowercase ) A_ : Tuple = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(lowercase ): model.save_pretrained(lowercase ) A_ : Optional[int] = model.reverse_bettertransformer() model.save_pretrained(lowercase )
558
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { """shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""", # See all Dinat models at https://huggingface.co/models?filter=dinat } class UpperCAmelCase ( __A , __A ): '''simple docstring''' lowerCamelCase_ = '''dinat''' lowerCamelCase_ = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , lowercase=4 , lowercase=3 , lowercase=6_4 , lowercase=[3, 4, 6, 5] , lowercase=[2, 4, 8, 1_6] , lowercase=7 , lowercase=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowercase=3.0 , lowercase=True , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase="gelu" , lowercase=0.02 , lowercase=1E-5 , lowercase=0.0 , lowercase=None , lowercase=None , **lowercase , ): """simple docstring""" super().__init__(**lowercase ) A_ : List[Any] = patch_size A_ : List[str] = num_channels A_ : Any = embed_dim A_ : List[Any] = depths A_ : List[Any] = len(lowercase ) A_ : Optional[int] = num_heads A_ : Any = kernel_size A_ : Tuple = dilations A_ : Optional[int] = mlp_ratio A_ : Optional[Any] = qkv_bias A_ : Dict = hidden_dropout_prob A_ : str = attention_probs_dropout_prob A_ : Optional[Any] = drop_path_rate A_ : str = hidden_act A_ : Optional[Any] = layer_norm_eps A_ : Any = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model A_ : Dict = int(embed_dim * 2 ** (len(lowercase ) - 1) ) A_ : Optional[int] = layer_scale_init_value A_ : Optional[int] = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(lowercase ) + 1 )] A_ , A_ : Any = get_aligned_output_features_output_indices( out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
558
1
'''simple docstring''' import os from collections.abc import Iterator def snake_case_ ( __snake_case : Optional[int] = ".") -> Iterator[str]: for dir_path, dir_names, filenames in os.walk(__snake_case): lowerCAmelCase_ = [d for d in dir_names if d != 'scripts' and d[0] not in '._'] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(__snake_case)[1] in (".py", ".ipynb"): yield os.path.join(__snake_case , __snake_case).lstrip('''./''') def snake_case_ ( __snake_case : List[str]) -> List[Any]: return F'''{i * ' '}*''' if i else "\n##" def snake_case_ ( __snake_case : str , __snake_case : List[Any]) -> str: lowerCAmelCase_ = old_path.split(os.sep) for i, new_part in enumerate(new_path.split(os.sep)): if (i + 1 > len(__snake_case) or old_parts[i] != new_part) and new_part: print(F'''{md_prefix(__snake_case)} {new_part.replace('_' , ' ').title()}''') return new_path def snake_case_ ( __snake_case : List[Any] = ".") -> None: lowerCAmelCase_ = '' for filepath in sorted(good_file_paths(__snake_case)): lowerCAmelCase_ = os.path.split(__snake_case) if filepath != old_path: lowerCAmelCase_ = print_path(__snake_case , __snake_case) lowerCAmelCase_ = (filepath.count(os.sep) + 1) if filepath else 0 lowerCAmelCase_ = F'''{filepath}/{filename}'''.replace(''' ''' , '''%20''') lowerCAmelCase_ = os.path.splitext(filename.replace('''_''' , ''' ''').title())[0] print(F'''{md_prefix(__snake_case)} [{filename}]({url})''') if __name__ == "__main__": print_directory_md('''.''')
711
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() A_ : int =logging.get_logger(__name__) A_ : int =torch.device('''cpu''') def snake_case_ ( ) -> List[str]: lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = Image.open(requests.get(__snake_case , stream=__snake_case).raw) return im def snake_case_ ( __snake_case : List[str]) -> Tuple: if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1]) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1]) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2]) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2]) def snake_case_ ( __snake_case : List[str] , __snake_case : str , __snake_case : List[str]) -> Union[str, Any]: lowerCAmelCase_ = dct.pop(__snake_case) lowerCAmelCase_ = val def snake_case_ ( __snake_case : Union[str, Any]) -> Any: lowerCAmelCase_ = [] for k in state_dict.keys(): lowerCAmelCase_ = k if ".pwconv" in k: lowerCAmelCase_ = k_new.replace('''.pwconv''' , '''.point_wise_conv''') if ".dwconv" in k: lowerCAmelCase_ = k_new.replace('''.dwconv''' , '''.depth_wise_conv''') if ".Proj." in k: lowerCAmelCase_ = k_new.replace('''.Proj.''' , '''.proj.''') if "patch_embed" in k_new: lowerCAmelCase_ = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''') if "network" in k_new: lowerCAmelCase_ = k_new.split('''.''') if ls[2].isdigit(): lowerCAmelCase_ = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:]) else: lowerCAmelCase_ = k_new.replace('''network''' , '''swiftformer.encoder.network''') rename_keys.append((k, k_new)) return rename_keys @torch.no_grad() def snake_case_ ( __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[str]) -> Tuple: lowerCAmelCase_ = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size lowerCAmelCase_ = 1000 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''imagenet-1k-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''') , '''r''')) lowerCAmelCase_ = {int(__snake_case): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": lowerCAmelCase_ = [3, 3, 6, 4] lowerCAmelCase_ = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": lowerCAmelCase_ = [3, 3, 9, 6] lowerCAmelCase_ = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": lowerCAmelCase_ = [4, 3, 10, 5] lowerCAmelCase_ = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": lowerCAmelCase_ = [4, 4, 12, 6] lowerCAmelCase_ = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('''https'''): lowerCAmelCase_ = torch.hub.load_state_dict_from_url(__snake_case , map_location='''cpu''' , check_hash=__snake_case) else: lowerCAmelCase_ = torch.load(__snake_case , map_location='''cpu''') lowerCAmelCase_ = checkpoint lowerCAmelCase_ = create_rename_keys(__snake_case) for rename_key_src, rename_key_dest in rename_keys: rename_key(__snake_case , __snake_case , __snake_case) # load HuggingFace model lowerCAmelCase_ = SwiftFormerForImageClassification(__snake_case).eval() hf_model.load_state_dict(__snake_case) # prepare test inputs lowerCAmelCase_ = prepare_img() lowerCAmelCase_ = ViTImageProcessor.from_pretrained('''preprocessor_config''') lowerCAmelCase_ = processor(images=__snake_case , return_tensors='''pt''') # compare outputs from both models lowerCAmelCase_ = get_expected_output(__snake_case) lowerCAmelCase_ = hf_model(inputs['''pixel_values''']).logits assert hf_logits.shape == torch.Size([1, 1000]) assert torch.allclose(hf_logits[0, 0:5] , __snake_case , atol=1E-3) Path(__snake_case).mkdir(exist_ok=__snake_case) print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''') hf_model.save_pretrained(__snake_case) if __name__ == "__main__": A_ : Optional[Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') A_ : Dict =parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
606
0
"""simple docstring""" from __future__ import annotations from collections import deque class __lowerCAmelCase : '''simple docstring''' def __init__( self , a ): """simple docstring""" snake_case_ :Optional[int] = [] self.adlist.append( {"value": "", "next_states": [], "fail_state": 0, "output": []} ) for keyword in keywords: self.add_keyword(_lowerCamelCase ) self.set_fail_transitions() def _a ( self , a , a ): """simple docstring""" for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def _a ( self , a ): """simple docstring""" snake_case_ :Union[str, Any] = 0 for character in keyword: snake_case_ :str = self.find_next_state(_lowerCamelCase , _lowerCamelCase ) if next_state is None: self.adlist.append( { "value": character, "next_states": [], "fail_state": 0, "output": [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) snake_case_ :str = len(self.adlist ) - 1 else: snake_case_ :str = next_state self.adlist[current_state]["output"].append(_lowerCamelCase ) def _a ( self ): """simple docstring""" snake_case_ :List[Any] = deque() for node in self.adlist[0]["next_states"]: q.append(_lowerCamelCase ) snake_case_ :str = 0 while q: snake_case_ :Tuple = q.popleft() for child in self.adlist[r]["next_states"]: q.append(_lowerCamelCase ) snake_case_ :Optional[Any] = self.adlist[r]["fail_state"] while ( self.find_next_state(_lowerCamelCase , self.adlist[child]["value"] ) is None and state != 0 ): snake_case_ :Optional[int] = self.adlist[state]["fail_state"] snake_case_ :Dict = self.find_next_state( _lowerCamelCase , self.adlist[child]["value"] ) if self.adlist[child]["fail_state"] is None: snake_case_ :Union[str, Any] = 0 snake_case_ :Tuple = ( self.adlist[child]["output"] + self.adlist[self.adlist[child]["fail_state"]]["output"] ) def _a ( self , a ): """simple docstring""" snake_case_ :List[str] = {} # returns a dict with keywords and list of its occurrences snake_case_ :Union[str, Any] = 0 for i in range(len(_lowerCamelCase ) ): while ( self.find_next_state(_lowerCamelCase , string[i] ) is None and current_state != 0 ): snake_case_ :str = self.adlist[current_state]["fail_state"] snake_case_ :int = self.find_next_state(_lowerCamelCase , string[i] ) if next_state is None: snake_case_ :str = 0 else: snake_case_ :List[Any] = next_state for key in self.adlist[current_state]["output"]: if key not in result: snake_case_ :Optional[Any] = [] result[key].append(i - len(_lowerCamelCase ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
584
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __magic_name__ ( __UpperCAmelCase): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = "Speech2TextFeatureExtractor" SCREAMING_SNAKE_CASE__ : List[str] = "Speech2TextTokenizer" def __init__( self: List[str] , _lowerCamelCase: str , _lowerCamelCase: Optional[Any] ): super().__init__(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE_ = self.feature_extractor SCREAMING_SNAKE_CASE_ = False def __call__( self: List[str] , *_lowerCamelCase: Dict , **_lowerCamelCase: List[str] ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_lowerCamelCase , **_lowerCamelCase ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) SCREAMING_SNAKE_CASE_ = kwargs.pop('''raw_speech''' ) else: SCREAMING_SNAKE_CASE_ = kwargs.pop('''audio''' , _lowerCamelCase ) SCREAMING_SNAKE_CASE_ = kwargs.pop('''sampling_rate''' , _lowerCamelCase ) SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _lowerCamelCase ) if len(_lowerCamelCase ) > 0: SCREAMING_SNAKE_CASE_ = args[0] SCREAMING_SNAKE_CASE_ = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: SCREAMING_SNAKE_CASE_ = self.feature_extractor(_lowerCamelCase , *_lowerCamelCase , sampling_rate=_lowerCamelCase , **_lowerCamelCase ) if text is not None: SCREAMING_SNAKE_CASE_ = self.tokenizer(_lowerCamelCase , **_lowerCamelCase ) if text is None: return inputs elif audio is None: return encodings else: SCREAMING_SNAKE_CASE_ = encodings['''input_ids'''] return inputs def _A ( self: List[str] , *_lowerCamelCase: List[Any] , **_lowerCamelCase: Union[str, Any] ): return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase ) def _A ( self: Union[str, Any] , *_lowerCamelCase: str , **_lowerCamelCase: Optional[Any] ): return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase ) @contextmanager def _A ( self: List[Any] ): warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = self.tokenizer yield SCREAMING_SNAKE_CASE_ = self.feature_extractor SCREAMING_SNAKE_CASE_ = False
234
0
"""simple docstring""" import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node __UpperCAmelCase = 4 __UpperCAmelCase = 3 class __lowercase ( SCREAMING_SNAKE_CASE__ ): pass def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' for shard in shards: for i in range(__UpperCamelCase ): yield {"i": i, "shard": shard} def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = int(os.environ["""RANK"""] ) UpperCAmelCase__ : Dict = int(os.environ["""WORLD_SIZE"""] ) UpperCAmelCase__ : Optional[int] = ArgumentParser() parser.add_argument("""--streaming""" , type=__UpperCamelCase ) parser.add_argument("""--local_rank""" , type=__UpperCamelCase ) parser.add_argument("""--num_workers""" , type=__UpperCamelCase , default=0 ) UpperCAmelCase__ : List[str] = parser.parse_args() UpperCAmelCase__ : List[str] = args.streaming UpperCAmelCase__ : Dict = args.num_workers UpperCAmelCase__ : Dict = {"shards": [F"shard_{shard_idx}" for shard_idx in range(__UpperCamelCase )]} UpperCAmelCase__ : str = IterableDataset.from_generator(__UpperCamelCase , gen_kwargs=__UpperCamelCase ) if not streaming: UpperCAmelCase__ : List[str] = Dataset.from_list(list(__UpperCamelCase ) ) UpperCAmelCase__ : Optional[int] = split_dataset_by_node(__UpperCamelCase , rank=__UpperCamelCase , world_size=__UpperCamelCase ) UpperCAmelCase__ : Any = torch.utils.data.DataLoader(__UpperCamelCase , num_workers=__UpperCamelCase ) UpperCAmelCase__ : Union[str, Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD UpperCAmelCase__ : str = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) UpperCAmelCase__ : List[Any] = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(F"local_size {local_size} != expected_local_size {expected_local_size}" ) if __name__ == "__main__": main()
719
"""simple docstring""" from typing import List, Optional, Union import torch from transformers import ( XLMRobertaTokenizer, ) from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) from .text_encoder import MultilingualCLIP __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n' def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=8 ): '''simple docstring''' UpperCAmelCase__ : Tuple = h // scale_factor**2 if h % scale_factor**2 != 0: new_h += 1 UpperCAmelCase__ : Any = w // scale_factor**2 if w % scale_factor**2 != 0: new_w += 1 return new_h * scale_factor, new_w * scale_factor class __lowercase ( __lowerCamelCase ): def __init__( self : str ,A : MultilingualCLIP ,A : XLMRobertaTokenizer ,A : UNetaDConditionModel ,A : Union[DDIMScheduler, DDPMScheduler] ,A : VQModel ,): '''simple docstring''' super().__init__() self.register_modules( text_encoder=A ,tokenizer=A ,unet=A ,scheduler=A ,movq=A ,) UpperCAmelCase__ : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __lowercase ( self : Dict ,A : Any ,A : Tuple ,A : Dict ,A : int ,A : str ,A : List[str] ): '''simple docstring''' if latents is None: UpperCAmelCase__ : Any = randn_tensor(A ,generator=A ,device=A ,dtype=A ) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" ) UpperCAmelCase__ : int = latents.to(A ) UpperCAmelCase__ : Any = latents * scheduler.init_noise_sigma return latents def __lowercase ( self : Optional[int] ,A : List[Any] ,A : Optional[Any] ,A : str ,A : Optional[Any] ,A : str=None ,): '''simple docstring''' UpperCAmelCase__ : List[Any] = len(A ) if isinstance(A ,A ) else 1 # get prompt text embeddings UpperCAmelCase__ : List[Any] = self.tokenizer( A ,padding="""max_length""" ,truncation=A ,max_length=77 ,return_attention_mask=A ,add_special_tokens=A ,return_tensors="""pt""" ,) UpperCAmelCase__ : List[str] = text_inputs.input_ids UpperCAmelCase__ : Any = self.tokenizer(A ,padding="""longest""" ,return_tensors="""pt""" ).input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(A ,A ): UpperCAmelCase__ : List[str] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) UpperCAmelCase__ : str = text_input_ids.to(A ) UpperCAmelCase__ : Optional[Any] = text_inputs.attention_mask.to(A ) UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.text_encoder( input_ids=A ,attention_mask=A ) UpperCAmelCase__ : Optional[int] = prompt_embeds.repeat_interleave(A ,dim=0 ) UpperCAmelCase__ : Optional[int] = text_encoder_hidden_states.repeat_interleave(A ,dim=0 ) UpperCAmelCase__ : List[str] = text_mask.repeat_interleave(A ,dim=0 ) if do_classifier_free_guidance: UpperCAmelCase__ : List[str] if negative_prompt is None: UpperCAmelCase__ : List[Any] = [""""""] * batch_size elif type(A ) is not type(A ): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(A )} !=" f" {type(A )}." ) elif isinstance(A ,A ): UpperCAmelCase__ : Any = [negative_prompt] elif batch_size != len(A ): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" """ the batch size of `prompt`.""" ) else: UpperCAmelCase__ : List[Any] = negative_prompt UpperCAmelCase__ : Any = self.tokenizer( A ,padding="""max_length""" ,max_length=77 ,truncation=A ,return_attention_mask=A ,add_special_tokens=A ,return_tensors="""pt""" ,) UpperCAmelCase__ : Optional[int] = uncond_input.input_ids.to(A ) UpperCAmelCase__ : str = uncond_input.attention_mask.to(A ) UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.text_encoder( input_ids=A ,attention_mask=A ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase__ : Any = negative_prompt_embeds.shape[1] UpperCAmelCase__ : Any = negative_prompt_embeds.repeat(1 ,A ) UpperCAmelCase__ : str = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,A ) UpperCAmelCase__ : Dict = uncond_text_encoder_hidden_states.shape[1] UpperCAmelCase__ : Any = uncond_text_encoder_hidden_states.repeat(1 ,A ,1 ) UpperCAmelCase__ : Any = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt ,A ,-1 ) UpperCAmelCase__ : List[Any] = uncond_text_mask.repeat_interleave(A ,dim=0 ) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase__ : Any = torch.cat([negative_prompt_embeds, prompt_embeds] ) UpperCAmelCase__ : int = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] ) UpperCAmelCase__ : str = torch.cat([uncond_text_mask, text_mask] ) return prompt_embeds, text_encoder_hidden_states, text_mask def __lowercase ( self : Tuple ,A : Dict=0 ): '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) UpperCAmelCase__ : str = torch.device(f"cuda:{gpu_id}" ) UpperCAmelCase__ : Tuple = [ self.unet, self.text_encoder, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(A ,A ) def __lowercase ( self : int ,A : Optional[Any]=0 ): '''simple docstring''' if is_accelerate_available() and is_accelerate_version(""">=""" ,"""0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) UpperCAmelCase__ : List[str] = torch.device(f"cuda:{gpu_id}" ) if self.device.type != "cpu": self.to("""cpu""" ,silence_dtype_warnings=A ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) UpperCAmelCase__ : List[str] = None for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]: UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = cpu_offload_with_hook(A ,A ,prev_module_hook=A ) if self.safety_checker is not None: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = cpu_offload_with_hook(self.safety_checker ,A ,prev_module_hook=A ) # We'll offload the last model manually. UpperCAmelCase__ : Any = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __lowercase ( self : List[Any] ): '''simple docstring''' if not hasattr(self.unet ,"""_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(A ,"""_hf_hook""" ) and hasattr(module._hf_hook ,"""execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(A ) def __call__( self : Union[str, Any] ,A : Union[str, List[str]] ,A : Union[torch.FloatTensor, List[torch.FloatTensor]] ,A : Union[torch.FloatTensor, List[torch.FloatTensor]] ,A : Optional[Union[str, List[str]]] = None ,A : int = 512 ,A : int = 512 ,A : int = 100 ,A : float = 4.0 ,A : int = 1 ,A : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,A : Optional[torch.FloatTensor] = None ,A : Optional[str] = "pil" ,A : bool = True ,): '''simple docstring''' if isinstance(A ,A ): UpperCAmelCase__ : str = 1 elif isinstance(A ,A ): UpperCAmelCase__ : Tuple = len(A ) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(A )}" ) UpperCAmelCase__ : Optional[int] = self._execution_device UpperCAmelCase__ : Dict = batch_size * num_images_per_prompt UpperCAmelCase__ : Union[str, Any] = guidance_scale > 1.0 UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self._encode_prompt( A ,A ,A ,A ,A ) if isinstance(A ,A ): UpperCAmelCase__ : Dict = torch.cat(A ,dim=0 ) if isinstance(A ,A ): UpperCAmelCase__ : Dict = torch.cat(A ,dim=0 ) if do_classifier_free_guidance: UpperCAmelCase__ : Optional[int] = image_embeds.repeat_interleave(A ,dim=0 ) UpperCAmelCase__ : Tuple = negative_image_embeds.repeat_interleave(A ,dim=0 ) UpperCAmelCase__ : List[str] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to( dtype=prompt_embeds.dtype ,device=A ) self.scheduler.set_timesteps(A ,device=A ) UpperCAmelCase__ : int = self.scheduler.timesteps UpperCAmelCase__ : Union[str, Any] = self.unet.config.in_channels UpperCAmelCase__ , UpperCAmelCase__ : Dict = get_new_h_w(A ,A ,self.movq_scale_factor ) # create initial latent UpperCAmelCase__ : Dict = self.prepare_latents( (batch_size, num_channels_latents, height, width) ,text_encoder_hidden_states.dtype ,A ,A ,A ,self.scheduler ,) for i, t in enumerate(self.progress_bar(A ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase__ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase__ : Optional[Any] = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds} UpperCAmelCase__ : Any = self.unet( sample=A ,timestep=A ,encoder_hidden_states=A ,added_cond_kwargs=A ,return_dict=A ,)[0] if do_classifier_free_guidance: UpperCAmelCase__ , UpperCAmelCase__ : List[str] = noise_pred.split(latents.shape[1] ,dim=1 ) UpperCAmelCase__ , UpperCAmelCase__ : Any = noise_pred.chunk(2 ) UpperCAmelCase__ , UpperCAmelCase__ : str = variance_pred.chunk(2 ) UpperCAmelCase__ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) UpperCAmelCase__ : List[Any] = torch.cat([noise_pred, variance_pred_text] ,dim=1 ) if not ( hasattr(self.scheduler.config ,"""variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = noise_pred.split(latents.shape[1] ,dim=1 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase__ : Optional[Any] = self.scheduler.step( A ,A ,A ,generator=A ,).prev_sample # post-processing UpperCAmelCase__ : List[Any] = self.movq.decode(A ,force_not_quantize=A )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" ) if output_type in ["np", "pil"]: UpperCAmelCase__ : Union[str, Any] = image * 0.5 + 0.5 UpperCAmelCase__ : Optional[Any] = image.clamp(0 ,1 ) UpperCAmelCase__ : Dict = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy() if output_type == "pil": UpperCAmelCase__ : List[str] = self.numpy_to_pil(A ) if not return_dict: return (image,) return ImagePipelineOutput(images=A )
194
0
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, logging if is_torch_available(): import torch UpperCAmelCase : List[str] = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""pixel_values"""] def __init__( self : List[Any] , UpperCamelCase : bool = True , UpperCamelCase : Optional[Dict[str, int]] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , **UpperCamelCase : Tuple , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : List[Any] = size if size is not None else {"""shortest_edge""": 256} __UpperCAmelCase : Union[str, Any] = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , param_name="""crop_size""" ) __UpperCAmelCase : Union[str, Any] = do_resize __UpperCAmelCase : Tuple = size __UpperCAmelCase : str = resample __UpperCAmelCase : List[str] = do_center_crop __UpperCAmelCase : List[Any] = crop_size __UpperCAmelCase : List[Any] = do_rescale __UpperCAmelCase : int = rescale_factor __UpperCAmelCase : List[str] = do_normalize __UpperCAmelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __UpperCAmelCase : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : str , ): '''simple docstring''' __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __UpperCAmelCase : Tuple = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Tuple , ): '''simple docstring''' __UpperCAmelCase : str = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' ) return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : np.ndarray , UpperCamelCase : float , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[str] ): '''simple docstring''' return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Tuple , ): '''simple docstring''' return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : ImageInput , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[float] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : Dict , ): '''simple docstring''' __UpperCAmelCase : List[str] = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Union[str, Any] = size if size is not None else self.size __UpperCAmelCase : Optional[int] = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = resample if resample is not None else self.resample __UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : List[str] = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , param_name="""crop_size""" ) __UpperCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Optional[Any] = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Tuple = image_std if image_std is not None else self.image_std __UpperCAmelCase : Tuple = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. __UpperCAmelCase : List[str] = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: __UpperCAmelCase : Union[str, Any] = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: __UpperCAmelCase : int = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: __UpperCAmelCase : Union[str, Any] = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: __UpperCAmelCase : List[str] = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] __UpperCAmelCase : int = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] __UpperCAmelCase : int = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Dict , UpperCamelCase : List[Tuple] = None ): '''simple docstring''' __UpperCAmelCase : Optional[int] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(UpperCamelCase ) != len(UpperCamelCase ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(UpperCamelCase ): __UpperCAmelCase : Dict = target_sizes.numpy() __UpperCAmelCase : List[Any] = [] for idx in range(len(UpperCamelCase ) ): __UpperCAmelCase : Optional[Any] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=UpperCamelCase ) __UpperCAmelCase : int = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(UpperCamelCase ) else: __UpperCAmelCase : str = logits.argmax(dim=1 ) __UpperCAmelCase : Tuple = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
139
"""simple docstring""" import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Optional[Any] , UpperCamelCase : Union[str, "sqlalchemy.sql.Selectable"] , UpperCamelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , UpperCamelCase : Optional[Features] = None , UpperCamelCase : str = None , UpperCamelCase : bool = False , **UpperCamelCase : List[Any] , ): '''simple docstring''' super().__init__(features=UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Dict = Sql( cache_dir=UpperCamelCase , features=UpperCamelCase , sql=UpperCamelCase , con=UpperCamelCase , **UpperCamelCase , ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Tuple = None __UpperCAmelCase : Tuple = None __UpperCAmelCase : str = None __UpperCAmelCase : Dict = None self.builder.download_and_prepare( download_config=UpperCamelCase , download_mode=UpperCamelCase , verification_mode=UpperCamelCase , base_path=UpperCamelCase , ) # Build dataset for splits __UpperCAmelCase : Optional[int] = self.builder.as_dataset( split="""train""" , verification_mode=UpperCamelCase , in_memory=self.keep_in_memory ) return dataset class lowerCamelCase__ : """simple docstring""" def __init__( self : List[Any] , UpperCamelCase : Dataset , UpperCamelCase : str , UpperCamelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , **UpperCamelCase : Tuple , ): '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) __UpperCAmelCase : Tuple = dataset __UpperCAmelCase : int = name __UpperCAmelCase : Union[str, Any] = con __UpperCAmelCase : Optional[int] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __UpperCAmelCase : Optional[int] = num_proc __UpperCAmelCase : Any = to_sql_kwargs def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Dict = self.to_sql_kwargs.pop("""sql""" , UpperCamelCase ) __UpperCAmelCase : Dict = self.to_sql_kwargs.pop("""con""" , UpperCamelCase ) __UpperCAmelCase : Any = self.to_sql_kwargs.pop("""index""" , UpperCamelCase ) __UpperCAmelCase : Dict = self._write(index=UpperCamelCase , **self.to_sql_kwargs ) return written def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Any = args __UpperCAmelCase : Optional[int] = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs __UpperCAmelCase : Optional[int] = query_table( table=self.dataset.data , key=slice(UpperCamelCase , offset + self.batch_size ) , indices=self.dataset._indices , ) __UpperCAmelCase : Optional[int] = batch.to_pandas() __UpperCAmelCase : Union[str, Any] = df.to_sql(self.name , self.con , index=UpperCamelCase , **UpperCamelCase ) return num_rows or len(UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Optional[int] , **UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : List[Any] = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: __UpperCAmelCase ,__UpperCAmelCase : Tuple = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , UpperCamelCase , UpperCamelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ): written += num_rows return written
139
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { "configuration_upernet": ["UperNetConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ "UperNetForSemanticSegmentation", "UperNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
718
"""simple docstring""" def _snake_case ( lowercase__ : int ) -> int: '''simple docstring''' lowerCAmelCase_ :List[str] = 0 while num > 0: digit_sum += num % 1_0 num //= 1_0 return digit_sum def _snake_case ( lowercase__ : int = 1_0_0 ) -> int: '''simple docstring''' lowerCAmelCase_ :Optional[Any] = 1 lowerCAmelCase_ :int = 2 for i in range(2 , max_n + 1 ): lowerCAmelCase_ :Optional[Any] = pre_numerator lowerCAmelCase_ :int = 2 * i // 3 if i % 3 == 0 else 1 lowerCAmelCase_ :str = cur_numerator lowerCAmelCase_ :List[Any] = e_cont * pre_numerator + temp return sum_digits(lowercase__ ) if __name__ == "__main__": print(F"""{solution() = }""")
256
0
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class snake_case ( unittest.TestCase ): def __lowercase( self : Dict , a_ : Optional[int] )-> Dict: """simple docstring""" for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ): SCREAMING_SNAKE_CASE__ : Optional[int] = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(a_ ) def __lowercase( self : Optional[int] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = 'sshleifer/tiny-gpt2' SCREAMING_SNAKE_CASE__ : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : List[Any] = PyTorchBenchmark(a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowercase( self : Optional[int] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = 'sgugger/tiny-distilbert-classification' SCREAMING_SNAKE_CASE__ : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , ) SCREAMING_SNAKE_CASE__ : Tuple = PyTorchBenchmark(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowercase( self : Optional[Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = 'sshleifer/tiny-gpt2' SCREAMING_SNAKE_CASE__ : List[str] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , torchscript=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : List[Any] = PyTorchBenchmark(a_ ) SCREAMING_SNAKE_CASE__ : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' ) def __lowercase( self : List[Any] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = 'sshleifer/tiny-gpt2' SCREAMING_SNAKE_CASE__ : List[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , fpaa=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : Tuple = PyTorchBenchmark(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowercase( self : Any )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = 'sshleifer/tiny-gpt2' SCREAMING_SNAKE_CASE__ : Tuple = AutoConfig.from_pretrained(a_ ) # set architectures equal to `None` SCREAMING_SNAKE_CASE__ : Any = None SCREAMING_SNAKE_CASE__ : List[str] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : List[Any] = PyTorchBenchmark(a_ , configs=[config] ) SCREAMING_SNAKE_CASE__ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowercase( self : int )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = 'sshleifer/tiny-gpt2' SCREAMING_SNAKE_CASE__ : str = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : Optional[int] = PyTorchBenchmark(a_ ) SCREAMING_SNAKE_CASE__ : int = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' ) def __lowercase( self : Any )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = 'sshleifer/tiny-gpt2' SCREAMING_SNAKE_CASE__ : int = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=a_ , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : List[Any] = PyTorchBenchmark(a_ ) SCREAMING_SNAKE_CASE__ : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __lowercase( self : str )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = 'sshleifer/tiny-gpt2' SCREAMING_SNAKE_CASE__ : List[str] = AutoConfig.from_pretrained(a_ ) SCREAMING_SNAKE_CASE__ : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : Tuple = PyTorchBenchmark(a_ , configs=[config] ) SCREAMING_SNAKE_CASE__ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowercase( self : List[Any] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = 'sshleifer/tinier_bart' SCREAMING_SNAKE_CASE__ : Tuple = AutoConfig.from_pretrained(a_ ) SCREAMING_SNAKE_CASE__ : List[str] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = PyTorchBenchmark(a_ , configs=[config] ) SCREAMING_SNAKE_CASE__ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def __lowercase( self : Dict )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = 'sshleifer/tiny-gpt2' SCREAMING_SNAKE_CASE__ : Optional[int] = AutoConfig.from_pretrained(a_ ) SCREAMING_SNAKE_CASE__ : List[str] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : List[str] = PyTorchBenchmark(a_ , configs=[config] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __lowercase( self : Union[str, Any] )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = 'sshleifer/tinier_bart' SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoConfig.from_pretrained(a_ ) SCREAMING_SNAKE_CASE__ : List[str] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : Tuple = PyTorchBenchmark(a_ , configs=[config] ) SCREAMING_SNAKE_CASE__ : int = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def __lowercase( self : Any )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE__ : Any = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(a_ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(a_ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(a_ , 'train_time.csv' ) , env_info_csv_file=os.path.join(a_ , 'env.csv' ) , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : str = PyTorchBenchmark(a_ ) benchmark.run() self.assertTrue(Path(os.path.join(a_ , 'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , 'train_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , 'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , 'train_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , 'env.csv' ) ).exists() ) def __lowercase( self : List[str] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(a_ : Tuple ): self.assertTrue(hasattr(a_ , 'sequential' ) ) self.assertTrue(hasattr(a_ , 'cumulative' ) ) self.assertTrue(hasattr(a_ , 'current' ) ) self.assertTrue(hasattr(a_ , 'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE__ : Optional[Any] = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , 'log.txt' ) , log_print=a_ , trace_memory_line_by_line=a_ , multi_process=a_ , ) SCREAMING_SNAKE_CASE__ : int = PyTorchBenchmark(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(a_ , 'log.txt' ) ).exists() )
85
from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class a__ : _A = PegasusConfig _A = {} _A = "gelu" def __init__( self : int , A_ : int , A_ : List[str]=13 , A_ : Optional[Any]=7 , A_ : Optional[Any]=True , A_ : Optional[int]=False , A_ : int=99 , A_ : List[Any]=32 , A_ : Optional[int]=2 , A_ : Tuple=4 , A_ : Optional[Any]=37 , A_ : List[Any]=0.1 , A_ : Any=0.1 , A_ : Optional[Any]=40 , A_ : str=2 , A_ : Optional[Any]=1 , A_ : Tuple=0 , ) -> Optional[int]: """simple docstring""" lowerCamelCase_: str = parent lowerCamelCase_: int = batch_size lowerCamelCase_: Any = seq_length lowerCamelCase_: Optional[int] = is_training lowerCamelCase_: Union[str, Any] = use_labels lowerCamelCase_: List[Any] = vocab_size lowerCamelCase_: Dict = hidden_size lowerCamelCase_: str = num_hidden_layers lowerCamelCase_: List[str] = num_attention_heads lowerCamelCase_: List[Any] = intermediate_size lowerCamelCase_: List[Any] = hidden_dropout_prob lowerCamelCase_: Any = attention_probs_dropout_prob lowerCamelCase_: int = max_position_embeddings lowerCamelCase_: int = eos_token_id lowerCamelCase_: Optional[int] = pad_token_id lowerCamelCase_: str = bos_token_id def lowerCAmelCase ( self : List[Any] ) -> Any: """simple docstring""" lowerCamelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowerCamelCase_: Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowerCamelCase_: str = tf.concat([input_ids, eos_tensor] , axis=1 ) lowerCamelCase_: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_: List[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowerCamelCase_: List[Any] = prepare_pegasus_inputs_dict(A_ , A_ , A_ ) return config, inputs_dict def lowerCAmelCase ( self : List[str] , A_ : Optional[Any] , A_ : str ) -> List[Any]: """simple docstring""" lowerCamelCase_: Tuple = TFPegasusModel(config=A_ ).get_decoder() lowerCamelCase_: Union[str, Any] = inputs_dict["""input_ids"""] lowerCamelCase_: int = input_ids[:1, :] lowerCamelCase_: List[Any] = inputs_dict["""attention_mask"""][:1, :] lowerCamelCase_: Union[str, Any] = inputs_dict["""head_mask"""] lowerCamelCase_: Tuple = 1 # first forward pass lowerCamelCase_: Optional[int] = model(A_ , attention_mask=A_ , head_mask=A_ , use_cache=A_ ) lowerCamelCase_ , lowerCamelCase_: Optional[int] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCamelCase_: Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCamelCase_: Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowerCamelCase_: Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 ) lowerCamelCase_: Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowerCamelCase_: int = model(A_ , attention_mask=A_ )[0] lowerCamelCase_: Optional[int] = model(A_ , attention_mask=A_ , past_key_values=A_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowerCamelCase_: Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowerCamelCase_: Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx] lowerCamelCase_: Optional[int] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(A_ , A_ , rtol=1e-3 ) def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ): if attention_mask is None: lowerCamelCase_: Optional[int] = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCamelCase_: List[str] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCamelCase_: int = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCamelCase_: List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCamelCase_: List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _A = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () _A = (TFPegasusForConditionalGeneration,) if is_tf_available() else () _A = ( { "conversational": TFPegasusForConditionalGeneration, "feature-extraction": TFPegasusModel, "summarization": TFPegasusForConditionalGeneration, "text2text-generation": TFPegasusForConditionalGeneration, "translation": TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) _A = True _A = False _A = False def lowerCAmelCase ( self : Union[str, Any] ) -> Any: """simple docstring""" lowerCamelCase_: Tuple = TFPegasusModelTester(self ) lowerCamelCase_: Optional[int] = ConfigTester(self , config_class=A_ ) def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ) -> Tuple: """simple docstring""" lowerCamelCase_: List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*A_ ) @require_sentencepiece @require_tokenizers @require_tf class a__ ( unittest.TestCase ): _A = [ " PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", " The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ", ] _A = [ "California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to" " reduce the risk of wildfires.", "N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.", ] # differs slightly from pytorch, likely due to numerical differences in linear layers _A = "google/pegasus-xsum" @cached_property def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_: Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCAmelCase ( self : str , **A_ : Optional[int] ) -> int: """simple docstring""" lowerCamelCase_: Union[str, Any] = self.translate_src_text(**A_ ) assert self.expected_text == generated_words def lowerCAmelCase ( self : str , **A_ : int ) -> Optional[Any]: """simple docstring""" lowerCamelCase_: List[Any] = self.tokenizer(self.src_text , **A_ , padding=A_ , return_tensors="""tf""" ) lowerCamelCase_: str = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=A_ , ) lowerCamelCase_: Tuple = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A_ ) return generated_words @slow def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" self._assert_generated_batch_equal_expected()
423
0
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _lowercase ( _UpperCamelCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = ["image_processor", "tokenizer"] SCREAMING_SNAKE_CASE__ : Tuple = "CLIPImageProcessor" SCREAMING_SNAKE_CASE__ : Optional[int] = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self :int , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Optional[Any]=None , **lowerCAmelCase__ :Any ) -> Optional[int]: __SCREAMING_SNAKE_CASE : List[Any] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __a , ) __SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop('''feature_extractor''' ) __SCREAMING_SNAKE_CASE : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__a , __a ) def __call__( self :Any , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :List[Any]=None , **lowerCAmelCase__ :Union[str, Any] ) -> Union[str, Any]: if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: __SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(__a , return_tensors=__a , **__a ) if images is not None: __SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(__a , return_tensors=__a , **__a ) if text is not None and images is not None: __SCREAMING_SNAKE_CASE : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__a ) , tensor_type=__a ) def __magic_name__( self :Tuple , *lowerCAmelCase__ :int , **lowerCAmelCase__ :Tuple ) -> int: return self.tokenizer.batch_decode(*__a , **__a ) def __magic_name__( self :List[Any] , *lowerCAmelCase__ :Tuple , **lowerCAmelCase__ :int ) -> int: return self.tokenizer.decode(*__a , **__a ) @property def __magic_name__( self :str ) -> str: __SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.model_input_names __SCREAMING_SNAKE_CASE : List[str] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __magic_name__( self :Dict ) -> str: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __a , ) return self.image_processor_class @property def __magic_name__( self :str ) -> List[str]: warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __a , ) return self.image_processor
707
import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset __lowerCAmelCase : Any =pd.read_csv( 'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/' 'position_salaries.csv' ) __lowerCAmelCase : Dict =dataset.iloc[:, 1:2].values __lowerCAmelCase : Any =dataset.iloc[:, 2].values __lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : List[str] =train_test_split(X, y, test_size=0.2, random_state=0) __lowerCAmelCase : List[Any] =PolynomialFeatures(degree=4) __lowerCAmelCase : Any =poly_reg.fit_transform(X) __lowerCAmelCase : str =LinearRegression() pol_reg.fit(X_poly, y) def _UpperCamelCase ( ): plt.scatter(lowercase__ , lowercase__ , color='''red''' ) plt.plot(lowercase__ , pol_reg.predict(poly_reg.fit_transform(lowercase__ ) ) , color='''blue''' ) plt.title('''Truth or Bluff (Linear Regression)''' ) plt.xlabel('''Position level''' ) plt.ylabel('''Salary''' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
260
0
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType lowerCamelCase__ : Optional[Any] = logging.get_logger(__name__) lowerCamelCase__ : List[Any] = { "openai/imagegpt-small": "", "openai/imagegpt-medium": "", "openai/imagegpt-large": "", } class lowercase__( _UpperCAmelCase ): '''simple docstring''' UpperCamelCase = """imagegpt""" UpperCamelCase = ["""past_key_values"""] UpperCamelCase = { """hidden_size""": """n_embd""", """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self :Union[str, Any] , lowerCamelCase_ :int=5_12 + 1 , lowerCamelCase_ :Tuple=32 * 32 , lowerCamelCase_ :List[Any]=5_12 , lowerCamelCase_ :List[Any]=24 , lowerCamelCase_ :Optional[Any]=8 , lowerCamelCase_ :Tuple=None , lowerCamelCase_ :Tuple="quick_gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :Optional[int]=0.1 , lowerCamelCase_ :Optional[int]=1E-5 , lowerCamelCase_ :Any=0.0_2 , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :str=True , lowerCamelCase_ :int=False , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :List[str]=False , **lowerCamelCase_ :Dict , ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size SCREAMING_SNAKE_CASE : Optional[int] = n_positions SCREAMING_SNAKE_CASE : Union[str, Any] = n_embd SCREAMING_SNAKE_CASE : List[Any] = n_layer SCREAMING_SNAKE_CASE : Any = n_head SCREAMING_SNAKE_CASE : Optional[int] = n_inner SCREAMING_SNAKE_CASE : Union[str, Any] = activation_function SCREAMING_SNAKE_CASE : List[str] = resid_pdrop SCREAMING_SNAKE_CASE : str = embd_pdrop SCREAMING_SNAKE_CASE : Any = attn_pdrop SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range SCREAMING_SNAKE_CASE : List[Any] = scale_attn_weights SCREAMING_SNAKE_CASE : int = use_cache SCREAMING_SNAKE_CASE : List[str] = scale_attn_by_inverse_layer_idx SCREAMING_SNAKE_CASE : str = reorder_and_upcast_attn SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings super().__init__(tie_word_embeddings=lowerCamelCase_ , **lowerCamelCase_ ) class lowercase__( _UpperCAmelCase ): '''simple docstring''' @property def __lowerCAmelCase ( self :Dict ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ] ) def __lowerCAmelCase ( self :str , lowerCamelCase_ :"FeatureExtractionMixin" , lowerCamelCase_ :int = 1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional["TensorType"] = None , lowerCamelCase_ :int = 3 , lowerCamelCase_ :int = 32 , lowerCamelCase_ :int = 32 , ) -> Mapping[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE : int = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = dict(preprocessor(images=lowerCamelCase_ , return_tensors=lowerCamelCase_ ) ) return inputs
698
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ : Tuple = logging.get_logger(__name__) lowerCamelCase__ : List[Any] = { "uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json", } class lowercase__( _UpperCAmelCase ): '''simple docstring''' UpperCamelCase = """mra""" def __init__( self :int , lowerCamelCase_ :Optional[int]=5_02_65 , lowerCamelCase_ :List[str]=7_68 , lowerCamelCase_ :List[str]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :int=30_72 , lowerCamelCase_ :Tuple="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :str=5_12 , lowerCamelCase_ :List[str]=1 , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :int=1E-5 , lowerCamelCase_ :List[Any]="absolute" , lowerCamelCase_ :str=4 , lowerCamelCase_ :List[str]="full" , lowerCamelCase_ :List[Any]=0 , lowerCamelCase_ :Optional[Any]=0 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :List[str]=0 , lowerCamelCase_ :List[Any]=2 , **lowerCamelCase_ :str , ) -> Dict: '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = vocab_size SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings SCREAMING_SNAKE_CASE : List[Any] = hidden_size SCREAMING_SNAKE_CASE : Dict = num_hidden_layers SCREAMING_SNAKE_CASE : Tuple = num_attention_heads SCREAMING_SNAKE_CASE : Any = intermediate_size SCREAMING_SNAKE_CASE : Any = hidden_act SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : str = initializer_range SCREAMING_SNAKE_CASE : Tuple = type_vocab_size SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps SCREAMING_SNAKE_CASE : str = position_embedding_type SCREAMING_SNAKE_CASE : List[str] = block_per_row SCREAMING_SNAKE_CASE : Optional[int] = approx_mode SCREAMING_SNAKE_CASE : List[Any] = initial_prior_first_n_blocks SCREAMING_SNAKE_CASE : Union[str, Any] = initial_prior_diagonal_n_blocks
698
1
import re def a ( A__ ) -> str: '''simple docstring''' if len(re.findall('''[ATCG]''' , A__ ) ) != len(A__ ): raise ValueError('''Invalid Strand''' ) return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
250
import mpmath # for roots of unity import numpy as np class lowercase : def __init__( self : Optional[Any] , _lowercase : List[Any]=None , _lowercase : str=None ): # Input as list SCREAMING_SNAKE_CASE__ : int = list(poly_a or [0] )[:] SCREAMING_SNAKE_CASE__ : int = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() SCREAMING_SNAKE_CASE__ : str = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() SCREAMING_SNAKE_CASE__ : Tuple = len(self.polyB ) # Add 0 to make lengths equal a power of 2 SCREAMING_SNAKE_CASE__ : Dict = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform SCREAMING_SNAKE_CASE__ : Union[str, Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product SCREAMING_SNAKE_CASE__ : Any = self.__multiply() def lowercase__ ( self : Optional[int] , _lowercase : Union[str, Any] ): SCREAMING_SNAKE_CASE__ : Dict = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB] # Corner case if len(_lowercase ) <= 1: return dft[0] # SCREAMING_SNAKE_CASE__ : int = self.c_max_length // 2 while next_ncol > 0: SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[] for i in range(_lowercase )] SCREAMING_SNAKE_CASE__ : Optional[Any] = self.root**next_ncol # First half of next step SCREAMING_SNAKE_CASE__ : List[Any] = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(_lowercase ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step SCREAMING_SNAKE_CASE__ : List[Any] = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(_lowercase ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update SCREAMING_SNAKE_CASE__ : int = new_dft SCREAMING_SNAKE_CASE__ : Any = next_ncol // 2 return dft[0] def lowercase__ ( self : str ): SCREAMING_SNAKE_CASE__ : List[Any] = self.__dft('''A''' ) SCREAMING_SNAKE_CASE__ : int = self.__dft('''B''' ) SCREAMING_SNAKE_CASE__ : int = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT SCREAMING_SNAKE_CASE__ : Dict = 2 while next_ncol <= self.c_max_length: SCREAMING_SNAKE_CASE__ : Tuple = [[] for i in range(_lowercase )] SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2) SCREAMING_SNAKE_CASE__ : Dict = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_inverse_c next_ncol *= 2 # Unpack SCREAMING_SNAKE_CASE__ : str = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self : Dict ): SCREAMING_SNAKE_CASE__ : str = '''A = ''' + ''' + '''.join( f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) ) SCREAMING_SNAKE_CASE__ : Tuple = '''B = ''' + ''' + '''.join( f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) ) SCREAMING_SNAKE_CASE__ : List[Any] = '''A*B = ''' + ''' + '''.join( f"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) ) return f"""{a}\n{b}\n{c}""" # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
250
1
"""simple docstring""" from itertools import product def _snake_case ( snake_case__ : int , snake_case__ : int ): A = sides_number A = max_face_number * dice_number A = [0] * (max_total + 1) A = 1 A = range(snake_case__ , max_face_number + 1 ) for dice_numbers in product(snake_case__ , repeat=snake_case__ ): A = sum(snake_case__ ) totals_frequencies[total] += 1 return totals_frequencies def _snake_case ( ): A = total_frequency_distribution( sides_number=4 , dice_number=9 ) A = total_frequency_distribution( sides_number=6 , dice_number=6 ) A = 0 A = 9 A = 4 * 9 A = 6 for peter_total in range(snake_case__ , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) A = (4**9) * (6**6) A = peter_wins_count / total_games_number A = round(snake_case__ , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F"""{solution() = }""")
91
"""simple docstring""" from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance a :str = 637_8137.0 a :Optional[Any] = 635_6752.31_4245 a :List[Any] = 6_378_137 def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> float: SCREAMING_SNAKE_CASE__ : Dict = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude SCREAMING_SNAKE_CASE__ : Dict = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) ) SCREAMING_SNAKE_CASE__ : Dict = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius SCREAMING_SNAKE_CASE__ : Tuple = haversine_distance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) / EQUATORIAL_RADIUS # Intermediate P and Q values SCREAMING_SNAKE_CASE__ : List[str] = (b_lata + b_lata) / 2 SCREAMING_SNAKE_CASE__ : Dict = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) SCREAMING_SNAKE_CASE__ : Tuple = (sin(__lowerCAmelCase ) ** 2) * (cos(__lowerCAmelCase ) ** 2) SCREAMING_SNAKE_CASE__ : str = cos(sigma / 2 ) ** 2 SCREAMING_SNAKE_CASE__ : List[str] = (sigma - sin(__lowerCAmelCase )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) SCREAMING_SNAKE_CASE__ : int = (cos(__lowerCAmelCase ) ** 2) * (sin(__lowerCAmelCase ) ** 2) SCREAMING_SNAKE_CASE__ : int = sin(sigma / 2 ) ** 2 SCREAMING_SNAKE_CASE__ : Optional[Any] = (sigma + sin(__lowerCAmelCase )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
680
0
# This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests lowerCAmelCase__ = open # noqa: we just need to have a builtin inside this module to test it properly
712
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""", # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class lowercase ( _lowercase ): """simple docstring""" a__ = "vit_mae" def __init__( self , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_24 , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=16 , __snake_case=5_12 , __snake_case=8 , __snake_case=20_48 , __snake_case=0.7_5 , __snake_case=False , **__snake_case , ): super().__init__(**__snake_case) _UpperCamelCase : Optional[int] = hidden_size _UpperCamelCase : Optional[int] = num_hidden_layers _UpperCamelCase : Tuple = num_attention_heads _UpperCamelCase : List[str] = intermediate_size _UpperCamelCase : str = hidden_act _UpperCamelCase : List[str] = hidden_dropout_prob _UpperCamelCase : List[Any] = attention_probs_dropout_prob _UpperCamelCase : str = initializer_range _UpperCamelCase : Any = layer_norm_eps _UpperCamelCase : int = image_size _UpperCamelCase : Any = patch_size _UpperCamelCase : List[Any] = num_channels _UpperCamelCase : Union[str, Any] = qkv_bias _UpperCamelCase : str = decoder_num_attention_heads _UpperCamelCase : Union[str, Any] = decoder_hidden_size _UpperCamelCase : Union[str, Any] = decoder_num_hidden_layers _UpperCamelCase : Any = decoder_intermediate_size _UpperCamelCase : int = mask_ratio _UpperCamelCase : List[Any] = norm_pix_loss
648
0
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class __A : def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=3 , a__=4 , a__=None , ): _lowerCAmelCase : int = parent _lowerCAmelCase : str = batch_size _lowerCAmelCase : Optional[int] = seq_length _lowerCAmelCase : List[Any] = is_training _lowerCAmelCase : Tuple = use_token_type_ids _lowerCAmelCase : Optional[Any] = use_labels _lowerCAmelCase : Tuple = vocab_size _lowerCAmelCase : Union[str, Any] = hidden_size _lowerCAmelCase : Optional[int] = num_hidden_layers _lowerCAmelCase : Optional[Any] = num_attention_heads _lowerCAmelCase : Dict = intermediate_size _lowerCAmelCase : str = hidden_act _lowerCAmelCase : Union[str, Any] = hidden_dropout_prob _lowerCAmelCase : List[str] = attention_probs_dropout_prob _lowerCAmelCase : Optional[Any] = max_position_embeddings _lowerCAmelCase : Tuple = type_vocab_size _lowerCAmelCase : int = type_sequence_label_size _lowerCAmelCase : Dict = initializer_range _lowerCAmelCase : Any = num_labels _lowerCAmelCase : Union[str, Any] = num_choices _lowerCAmelCase : Union[str, Any] = scope _lowerCAmelCase : Dict = self.vocab_size - 1 def __A ( self ): _lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase : str = None if self.use_token_type_ids: _lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _lowerCAmelCase : Dict = None _lowerCAmelCase : int = None _lowerCAmelCase : Dict = None if self.use_labels: _lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices ) _lowerCAmelCase : int = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) _lowerCAmelCase : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def __A ( self , a__ , a__ , a__ , a__ , *a__ ): _lowerCAmelCase : int = OpenAIGPTModel(config=a__ ) model.to(a__ ) model.eval() _lowerCAmelCase : List[Any] = model(a__ , token_type_ids=a__ , head_mask=a__ ) _lowerCAmelCase : Union[str, Any] = model(a__ , token_type_ids=a__ ) _lowerCAmelCase : int = model(a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , a__ , a__ , a__ , a__ , *a__ ): _lowerCAmelCase : Dict = OpenAIGPTLMHeadModel(a__ ) model.to(a__ ) model.eval() _lowerCAmelCase : List[Any] = model(a__ , token_type_ids=a__ , labels=a__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , a__ , a__ , a__ , a__ , *a__ ): _lowerCAmelCase : Dict = OpenAIGPTDoubleHeadsModel(a__ ) model.to(a__ ) model.eval() _lowerCAmelCase : List[str] = model(a__ , token_type_ids=a__ , labels=a__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , a__ , a__ , a__ , a__ , *a__ ): _lowerCAmelCase : Dict = self.num_labels _lowerCAmelCase : Tuple = OpenAIGPTForSequenceClassification(a__ ) model.to(a__ ) model.eval() _lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase : List[str] = model(a__ , token_type_ids=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self ): _lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) : str = config_and_inputs _lowerCAmelCase : Any = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask, } return config, inputs_dict @require_torch class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): _UpperCamelCase : Union[str, Any] = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) _UpperCamelCase : List[Any] = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly _UpperCamelCase : Tuple = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def __A ( self , a__ , a__ , a__ , a__ , a__ ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def __A ( self , a__ , a__ , a__=False ): _lowerCAmelCase : int = super()._prepare_for_class(a__ , a__ , return_labels=a__ ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": _lowerCAmelCase : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a__ , ) _lowerCAmelCase : Tuple = inputs_dict["""labels"""] _lowerCAmelCase : Dict = inputs_dict["""labels"""] _lowerCAmelCase : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a__ , ) _lowerCAmelCase : Any = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a__ ) return inputs_dict def __A ( self ): _lowerCAmelCase : List[Any] = OpenAIGPTModelTester(self ) _lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=a__ , n_embd=37 ) def __A ( self ): self.config_tester.run_common_tests() def __A ( self ): _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*a__ ) def __A ( self ): _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*a__ ) def __A ( self ): _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*a__ ) def __A ( self ): _lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a__ ) @slow def __A ( self ): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : Tuple = OpenAIGPTModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) @require_torch class __A ( unittest.TestCase ): @slow def __A ( self ): _lowerCAmelCase : Any = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" ) model.to(a__ ) _lowerCAmelCase : int = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=a__ ) # the president is _lowerCAmelCase : List[Any] = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the _lowerCAmelCase : Optional[Any] = model.generate(a__ , do_sample=a__ ) self.assertListEqual(output_ids[0].tolist() , a__ )
213
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class __A ( SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Dict = (UniPCMultistepScheduler,) _UpperCamelCase : List[str] = (("num_inference_steps", 25),) def __A ( self , **a__ ): _lowerCAmelCase : List[Any] = { """num_train_timesteps""": 1000, """beta_start""": 0.0_0_0_1, """beta_end""": 0.0_2, """beta_schedule""": """linear""", """solver_order""": 2, """solver_type""": """bh2""", } config.update(**a__ ) return config def __A ( self , a__=0 , **a__ ): _lowerCAmelCase : Dict = dict(self.forward_default_kwargs ) _lowerCAmelCase : Optional[Any] = kwargs.pop("""num_inference_steps""" , a__ ) _lowerCAmelCase : List[str] = self.dummy_sample _lowerCAmelCase : Optional[int] = 0.1 * sample _lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _lowerCAmelCase : int = self.get_scheduler_config(**a__ ) _lowerCAmelCase : Dict = scheduler_class(**a__ ) scheduler.set_timesteps(a__ ) # copy over dummy past residuals _lowerCAmelCase : Any = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a__ ) _lowerCAmelCase : List[str] = scheduler_class.from_pretrained(a__ ) new_scheduler.set_timesteps(a__ ) # copy over dummy past residuals _lowerCAmelCase : Dict = dummy_past_residuals[: new_scheduler.config.solver_order] _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = sample, sample for t in range(a__ , time_step + scheduler.config.solver_order + 1 ): _lowerCAmelCase : Optional[Any] = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample _lowerCAmelCase : int = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __A ( self , a__=0 , **a__ ): _lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs ) _lowerCAmelCase : Dict = kwargs.pop("""num_inference_steps""" , a__ ) _lowerCAmelCase : Optional[Any] = self.dummy_sample _lowerCAmelCase : List[Any] = 0.1 * sample _lowerCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _lowerCAmelCase : Union[str, Any] = self.get_scheduler_config() _lowerCAmelCase : Optional[int] = scheduler_class(**a__ ) scheduler.set_timesteps(a__ ) # copy over dummy past residuals (must be after setting timesteps) _lowerCAmelCase : Dict = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(a__ ) _lowerCAmelCase : Tuple = scheduler_class.from_pretrained(a__ ) # copy over dummy past residuals new_scheduler.set_timesteps(a__ ) # copy over dummy past residual (must be after setting timesteps) _lowerCAmelCase : int = dummy_past_residuals[: new_scheduler.config.solver_order] _lowerCAmelCase : int = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample _lowerCAmelCase : Union[str, Any] = new_scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __A ( self , a__=None , **a__ ): if scheduler is None: _lowerCAmelCase : Optional[int] = self.scheduler_classes[0] _lowerCAmelCase : Optional[int] = self.get_scheduler_config(**a__ ) _lowerCAmelCase : Optional[int] = scheduler_class(**a__ ) _lowerCAmelCase : Tuple = self.scheduler_classes[0] _lowerCAmelCase : List[Any] = self.get_scheduler_config(**a__ ) _lowerCAmelCase : Tuple = scheduler_class(**a__ ) _lowerCAmelCase : Optional[int] = 10 _lowerCAmelCase : Any = self.dummy_model() _lowerCAmelCase : int = self.dummy_sample_deter scheduler.set_timesteps(a__ ) for i, t in enumerate(scheduler.timesteps ): _lowerCAmelCase : Any = model(a__ , a__ ) _lowerCAmelCase : int = scheduler.step(a__ , a__ , a__ ).prev_sample return sample def __A ( self ): _lowerCAmelCase : Union[str, Any] = dict(self.forward_default_kwargs ) _lowerCAmelCase : Optional[Any] = kwargs.pop("""num_inference_steps""" , a__ ) for scheduler_class in self.scheduler_classes: _lowerCAmelCase : List[Any] = self.get_scheduler_config() _lowerCAmelCase : Optional[int] = scheduler_class(**a__ ) _lowerCAmelCase : Optional[int] = self.dummy_sample _lowerCAmelCase : str = 0.1 * sample if num_inference_steps is not None and hasattr(a__ , """set_timesteps""" ): scheduler.set_timesteps(a__ ) elif num_inference_steps is not None and not hasattr(a__ , """set_timesteps""" ): _lowerCAmelCase : str = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _lowerCAmelCase : List[str] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] _lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order] _lowerCAmelCase : List[str] = scheduler.timesteps[5] _lowerCAmelCase : Optional[Any] = scheduler.timesteps[6] _lowerCAmelCase : Optional[Any] = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample _lowerCAmelCase : int = scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __A ( self ): # make sure that iterating over schedulers with same config names gives same results # for defaults _lowerCAmelCase : Optional[Any] = UniPCMultistepScheduler(**self.get_scheduler_config() ) _lowerCAmelCase : List[Any] = self.full_loop(scheduler=a__ ) _lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(a__ ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3 _lowerCAmelCase : Optional[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _lowerCAmelCase : Union[str, Any] = DEISMultistepScheduler.from_config(scheduler.config ) _lowerCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config ) _lowerCAmelCase : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config ) _lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=a__ ) _lowerCAmelCase : Tuple = torch.mean(torch.abs(a__ ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3 def __A ( self ): for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=a__ ) def __A ( self ): self.check_over_configs(thresholding=a__ ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=a__ , prediction_type=a__ , sample_max_value=a__ , solver_order=a__ , solver_type=a__ , ) def __A ( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=a__ ) def __A ( self ): for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=a__ , solver_type=a__ , prediction_type=a__ , ) _lowerCAmelCase : Tuple = self.full_loop( solver_order=a__ , solver_type=a__ , prediction_type=a__ , ) assert not torch.isnan(a__ ).any(), "Samples have nan numbers" def __A ( self ): self.check_over_configs(lower_order_final=a__ ) self.check_over_configs(lower_order_final=a__ ) def __A ( self ): for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=a__ , time_step=0 ) def __A ( self ): _lowerCAmelCase : List[str] = self.full_loop() _lowerCAmelCase : Dict = torch.mean(torch.abs(a__ ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3 def __A ( self ): _lowerCAmelCase : Dict = self.full_loop(prediction_type="""v_prediction""" ) _lowerCAmelCase : str = torch.mean(torch.abs(a__ ) ) assert abs(result_mean.item() - 0.1_0_1_4 ) < 1e-3 def __A ( self ): _lowerCAmelCase : Optional[int] = self.scheduler_classes[0] _lowerCAmelCase : List[str] = self.get_scheduler_config(thresholding=a__ , dynamic_thresholding_ratio=0 ) _lowerCAmelCase : int = scheduler_class(**a__ ) _lowerCAmelCase : List[Any] = 10 _lowerCAmelCase : str = self.dummy_model() _lowerCAmelCase : str = self.dummy_sample_deter.half() scheduler.set_timesteps(a__ ) for i, t in enumerate(scheduler.timesteps ): _lowerCAmelCase : Optional[Any] = model(a__ , a__ ) _lowerCAmelCase : Tuple = scheduler.step(a__ , a__ , a__ ).prev_sample assert sample.dtype == torch.floataa def __A ( self , **a__ ): for scheduler_class in self.scheduler_classes: _lowerCAmelCase : Any = self.get_scheduler_config(**a__ ) _lowerCAmelCase : Optional[Any] = scheduler_class(**a__ ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
213
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from .config import config_command_parser from .config_args import default_config_file, load_config_from_file # noqa: F401 from .default import default_command_parser from .update import update_command_parser def __lowercase (_SCREAMING_SNAKE_CASE :List[Any]=None ): SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(add_help=_SCREAMING_SNAKE_CASE , allow_abbrev=_SCREAMING_SNAKE_CASE ) # The main config parser SCREAMING_SNAKE_CASE : Dict = config_command_parser(_SCREAMING_SNAKE_CASE ) # The subparser to add commands to SCREAMING_SNAKE_CASE : Any = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' ) # Then add other parsers with the parent parser default_command_parser(_SCREAMING_SNAKE_CASE , parents=[parent_parser] ) update_command_parser(_SCREAMING_SNAKE_CASE , parents=[parent_parser] ) return config_parser def __lowercase (): SCREAMING_SNAKE_CASE : List[Any] = get_config_parser() SCREAMING_SNAKE_CASE : int = config_parser.parse_args() if not hasattr(_SCREAMING_SNAKE_CASE , '''func''' ): config_parser.print_help() exit(1 ) # Run args.func(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
355
'''simple docstring''' import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] ): return x + 2 class a__ ( unittest.TestCase ): def lowercase__ (self : List[str] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = '''x = 3''' SCREAMING_SNAKE_CASE : Dict = {} SCREAMING_SNAKE_CASE : Any = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase ) assert result == 3 self.assertDictEqual(__UpperCAmelCase, {'''x''': 3} ) SCREAMING_SNAKE_CASE : str = '''x = y''' SCREAMING_SNAKE_CASE : int = {'''y''': 5} SCREAMING_SNAKE_CASE : Optional[int] = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__UpperCAmelCase, {'''x''': 5, '''y''': 5} ) def lowercase__ (self : Optional[int] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : int = '''y = add_two(x)''' SCREAMING_SNAKE_CASE : Optional[Any] = {'''x''': 3} SCREAMING_SNAKE_CASE : str = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase ) assert result == 5 self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 5} ) # Won't work without the tool with CaptureStdout() as out: SCREAMING_SNAKE_CASE : Tuple = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase ) assert result is None assert "tried to execute add_two" in out.out def lowercase__ (self : Any ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = '''x = 3''' SCREAMING_SNAKE_CASE : Any = {} SCREAMING_SNAKE_CASE : str = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase ) assert result == 3 self.assertDictEqual(__UpperCAmelCase, {'''x''': 3} ) def lowercase__ (self : Optional[int] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = '''test_dict = {\'x\': x, \'y\': add_two(x)}''' SCREAMING_SNAKE_CASE : Tuple = {'''x''': 3} SCREAMING_SNAKE_CASE : List[Any] = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase ) self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 5} ) self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def lowercase__ (self : Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = '''x = 3\ny = 5''' SCREAMING_SNAKE_CASE : Tuple = {} SCREAMING_SNAKE_CASE : Optional[Any] = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 5} ) def lowercase__ (self : Tuple ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = '''text = f\'This is x: {x}.\'''' SCREAMING_SNAKE_CASE : List[str] = {'''x''': 3} SCREAMING_SNAKE_CASE : Dict = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''text''': '''This is x: 3.'''} ) def lowercase__ (self : int ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = '''if x <= 3:\n y = 2\nelse:\n y = 5''' SCREAMING_SNAKE_CASE : int = {'''x''': 3} SCREAMING_SNAKE_CASE : List[str] = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 2} ) SCREAMING_SNAKE_CASE : Any = {'''x''': 8} SCREAMING_SNAKE_CASE : int = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__UpperCAmelCase, {'''x''': 8, '''y''': 5} ) def lowercase__ (self : Any ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Any = '''test_list = [x, add_two(x)]''' SCREAMING_SNAKE_CASE : List[str] = {'''x''': 3} SCREAMING_SNAKE_CASE : Tuple = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase, [3, 5] ) self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_list''': [3, 5]} ) def lowercase__ (self : int ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = '''y = x''' SCREAMING_SNAKE_CASE : Tuple = {'''x''': 3} SCREAMING_SNAKE_CASE : str = evaluate(__UpperCAmelCase, {}, state=__UpperCAmelCase ) assert result == 3 self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''y''': 3} ) def lowercase__ (self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Dict = '''test_list = [x, add_two(x)]\ntest_list[1]''' SCREAMING_SNAKE_CASE : int = {'''x''': 3} SCREAMING_SNAKE_CASE : Optional[int] = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase ) assert result == 5 self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_list''': [3, 5]} ) SCREAMING_SNAKE_CASE : Dict = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']''' SCREAMING_SNAKE_CASE : Tuple = {'''x''': 3} SCREAMING_SNAKE_CASE : Optional[Any] = evaluate(__UpperCAmelCase, {'''add_two''': add_two}, state=__UpperCAmelCase ) assert result == 5 self.assertDictEqual(__UpperCAmelCase, {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def lowercase__ (self : Optional[Any] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE : Any = '''x = 0\nfor i in range(3):\n x = i''' SCREAMING_SNAKE_CASE : List[Any] = {} SCREAMING_SNAKE_CASE : List[str] = evaluate(__UpperCAmelCase, {'''range''': range}, state=__UpperCAmelCase ) assert result == 2 self.assertDictEqual(__UpperCAmelCase, {'''x''': 2, '''i''': 2} )
355
1
"""simple docstring""" import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __magic_name__ ( UpperCamelCase : Tuple , UpperCamelCase : Optional[int] ) -> Tuple: a__ = old_name if "patch_embed" in old_name: a__ , a__ , a__ = old_name.split('.' ) if layer == "0": a__ = old_name.replace('0' , 'convolution1' ) elif layer == "1": a__ = old_name.replace('1' , 'batchnorm_before' ) elif layer == "3": a__ = old_name.replace('3' , 'convolution2' ) else: a__ = old_name.replace('4' , 'batchnorm_after' ) if "network" in old_name and re.search(r'\d\.\d' , UpperCamelCase ): a__ = r'\b\d{2}\b' if bool(re.search(UpperCamelCase , UpperCamelCase ) ): a__ = re.search(r'\d\.\d\d.' , UpperCamelCase ).group() else: a__ = re.search(r'\d\.\d.' , UpperCamelCase ).group() if int(match[0] ) < 6: a__ = old_name.replace(UpperCamelCase , '' ) a__ = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] ) a__ = 'intermediate_stages.' + trimmed_name else: a__ = old_name.replace(UpperCamelCase , '' ) if int(match[2] ) < num_meta4D_last_stage: a__ = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] ) else: a__ = str(int(match[2] ) - num_meta4D_last_stage ) a__ = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index ) if "norm1" in old_name: a__ = trimmed_name.replace('norm1' , 'layernorm1' ) elif "norm2" in old_name: a__ = trimmed_name.replace('norm2' , 'layernorm2' ) elif "fc1" in old_name: a__ = trimmed_name.replace('fc1' , 'linear_in' ) elif "fc2" in old_name: a__ = trimmed_name.replace('fc2' , 'linear_out' ) a__ = 'last_stage.' + trimmed_name elif "network" in old_name and re.search(r'.\d.' , UpperCamelCase ): a__ = old_name.replace('network' , 'intermediate_stages' ) if "fc" in new_name: a__ = new_name.replace('fc' , 'convolution' ) elif ("norm1" in new_name) and ("layernorm1" not in new_name): a__ = new_name.replace('norm1' , 'batchnorm_before' ) elif ("norm2" in new_name) and ("layernorm2" not in new_name): a__ = new_name.replace('norm2' , 'batchnorm_after' ) if "proj" in new_name: a__ = new_name.replace('proj' , 'projection' ) if "dist_head" in new_name: a__ = new_name.replace('dist_head' , 'distillation_classifier' ) elif "head" in new_name: a__ = new_name.replace('head' , 'classifier' ) elif "patch_embed" in new_name: a__ = 'efficientformer.' + new_name elif new_name == "norm.weight" or new_name == "norm.bias": a__ = new_name.replace('norm' , 'layernorm' ) a__ = 'efficientformer.' + new_name else: a__ = 'efficientformer.encoder.' + new_name return new_name def __magic_name__ ( UpperCamelCase : Tuple , UpperCamelCase : List[Any] ) -> str: for key in checkpoint.copy().keys(): a__ = checkpoint.pop(UpperCamelCase ) a__ = val return checkpoint def __magic_name__ ( ) -> Union[str, Any]: a__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' a__ = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) return image def __magic_name__ ( UpperCamelCase : Path , UpperCamelCase : Path , UpperCamelCase : Path , UpperCamelCase : bool ) -> Any: a__ = torch.load(UpperCamelCase , map_location='cpu' )['model'] a__ = EfficientFormerConfig.from_json_file(UpperCamelCase ) a__ = EfficientFormerForImageClassificationWithTeacher(UpperCamelCase ) a__ = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] ) a__ = config.depths[-1] - config.num_metaad_blocks + 1 a__ = convert_torch_checkpoint(UpperCamelCase , UpperCamelCase ) model.load_state_dict(UpperCamelCase ) model.eval() a__ = { 'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST, } # prepare image a__ = prepare_img() a__ = 256 a__ = 224 a__ = EfficientFormerImageProcessor( size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , ) a__ = processor(images=UpperCamelCase , return_tensors='pt' ).pixel_values # original processing pipeline a__ = Compose( [ Resize(UpperCamelCase , interpolation=pillow_resamplings['bicubic'] ), CenterCrop(UpperCamelCase ), ToTensor(), Normalize(UpperCamelCase , UpperCamelCase ), ] ) a__ = image_transforms(UpperCamelCase ).unsqueeze(0 ) assert torch.allclose(UpperCamelCase , UpperCamelCase ) a__ = model(UpperCamelCase ) a__ = outputs.logits a__ = (1, 1000) if "l1" in model_name: a__ = torch.Tensor( [-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] ) assert torch.allclose(logits[0, :10] , UpperCamelCase , atol=1E-3 ) assert logits.shape == expected_shape elif "l3" in model_name: a__ = torch.Tensor( [-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] ) assert torch.allclose(logits[0, :10] , UpperCamelCase , atol=1E-3 ) assert logits.shape == expected_shape elif "l7" in model_name: a__ = torch.Tensor( [-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] ) assert logits.shape == expected_shape else: raise ValueError( f'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' ) # Save Checkpoints Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) model.save_pretrained(UpperCamelCase ) print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' ) processor.save_pretrained(UpperCamelCase ) print(f'Processor successfuly saved at {pytorch_dump_path}' ) if push_to_hub: print('Pushing model to the hub...' ) model.push_to_hub( repo_id=f'Bearnardd/{pytorch_dump_path}' , commit_message='Add model' , use_temp_dir=UpperCamelCase , ) processor.push_to_hub( repo_id=f'Bearnardd/{pytorch_dump_path}' , commit_message='Add image processor' , use_temp_dir=UpperCamelCase , ) if __name__ == "__main__": a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--pytorch_model_path', default=None, type=str, required=True, help='Path to EfficientFormer pytorch checkpoint.', ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The json file for EfficientFormer model config.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') parser.add_argument( '--no-push_to_hub', dest='push_to_hub', action='store_false', help='Do not push model and image processor to the hub', ) parser.set_defaults(push_to_hub=True) a : Optional[Any] = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
273
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): a : str = 'pt' elif is_tf_available(): a : Dict = 'tf' else: a : Optional[Any] = 'jax' class lowercase(_lowercase , unittest.TestCase ): __snake_case: List[Any] = PerceiverTokenizer __snake_case: Optional[Any] = False def lowercase__ ( self ) -> Tuple: """simple docstring""" super().setUp() a__ = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase__ ( self ) -> Any: """simple docstring""" return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' ) def lowercase__ ( self , **__SCREAMING_SNAKE_CASE ) -> PerceiverTokenizer: """simple docstring""" return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE ) def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2_0 , __SCREAMING_SNAKE_CASE=5 ) -> Tuple[str, list]: """simple docstring""" a__ = [] for i in range(len(__SCREAMING_SNAKE_CASE ) ): try: a__ = tokenizer.decode([i] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE ) except UnicodeDecodeError: pass toks.append((i, tok) ) a__ = list(filter(lambda __SCREAMING_SNAKE_CASE : re.match(R'^[ a-zA-Z]+$' , t[1] ) , __SCREAMING_SNAKE_CASE ) ) a__ = list(filter(lambda __SCREAMING_SNAKE_CASE : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) ) if max_length is not None and len(__SCREAMING_SNAKE_CASE ) > max_length: a__ = toks[:max_length] if min_length is not None and len(__SCREAMING_SNAKE_CASE ) < min_length and len(__SCREAMING_SNAKE_CASE ) > 0: while len(__SCREAMING_SNAKE_CASE ) < min_length: a__ = toks + toks # toks_str = [t[1] for t in toks] a__ = [t[0] for t in toks] # Ensure consistency a__ = tokenizer.decode(__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE ) if " " not in output_txt and len(__SCREAMING_SNAKE_CASE ) > 1: a__ = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE ) ) if with_prefix_space: a__ = ' ' + output_txt a__ = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) return output_txt, output_ids def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" a__ = self.perceiver_tokenizer a__ = 'Unicode €.' a__ = tokenizer(__SCREAMING_SNAKE_CASE ) a__ = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5] self.assertEqual(encoded['input_ids'] , __SCREAMING_SNAKE_CASE ) # decoding a__ = tokenizer.decode(__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , '[CLS]Unicode €.[SEP]' ) a__ = tokenizer('e è é ê ë' ) a__ = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5] self.assertEqual(encoded['input_ids'] , __SCREAMING_SNAKE_CASE ) # decoding a__ = tokenizer.decode(__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , '[CLS]e è é ê ë[SEP]' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' ) def lowercase__ ( self ) -> List[str]: """simple docstring""" a__ = self.perceiver_tokenizer a__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off a__ = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0] # fmt: on a__ = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if FRAMEWORK != "jax": a__ = list(batch.input_ids.numpy()[0] ) else: a__ = list(batch.input_ids.tolist()[0] ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertEqual((2, 3_8) , batch.input_ids.shape ) self.assertEqual((2, 3_8) , batch.attention_mask.shape ) def lowercase__ ( self ) -> Tuple: """simple docstring""" a__ = self.perceiver_tokenizer a__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] a__ = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , __SCREAMING_SNAKE_CASE ) self.assertIn('attention_mask' , __SCREAMING_SNAKE_CASE ) self.assertNotIn('decoder_input_ids' , __SCREAMING_SNAKE_CASE ) self.assertNotIn('decoder_attention_mask' , __SCREAMING_SNAKE_CASE ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" a__ = self.perceiver_tokenizer a__ = [ 'Summary of the text.', 'Another summary.', ] a__ = tokenizer( text_target=__SCREAMING_SNAKE_CASE , max_length=3_2 , padding='max_length' , truncation=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) self.assertEqual(3_2 , targets['input_ids'].shape[1] ) def lowercase__ ( self ) -> Optional[int]: """simple docstring""" a__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): self.assertNotEqual(tokenizer.model_max_length , 4_2 ) # Now let's start the test a__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc a__ = tempfile.mkdtemp() a__ = ' He is very happy, UNwant\u00E9d,running' a__ = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE ) a__ = tokenizer.__class__.from_pretrained(__SCREAMING_SNAKE_CASE ) a__ = after_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) shutil.rmtree(__SCREAMING_SNAKE_CASE ) a__ = self.get_tokenizers(model_max_length=4_2 ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): # Isolate this from the other tests because we save additional tokens/etc a__ = tempfile.mkdtemp() a__ = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) a__ = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) a__ = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE ) a__ = tokenizer.__class__.from_pretrained(__SCREAMING_SNAKE_CASE ) a__ = after_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 4_2 ) a__ = tokenizer.__class__.from_pretrained(__SCREAMING_SNAKE_CASE , model_max_length=4_3 ) self.assertEqual(tokenizer.model_max_length , 4_3 ) shutil.rmtree(__SCREAMING_SNAKE_CASE ) def lowercase__ ( self ) -> List[Any]: """simple docstring""" a__ = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__SCREAMING_SNAKE_CASE ) with open(os.path.join(__SCREAMING_SNAKE_CASE , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: a__ = json.load(__SCREAMING_SNAKE_CASE ) with open(os.path.join(__SCREAMING_SNAKE_CASE , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: a__ = json.load(__SCREAMING_SNAKE_CASE ) a__ = [f'<extra_id_{i}>' for i in range(1_2_5 )] a__ = added_tokens_extra_ids + [ 'an_additional_special_token' ] a__ = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(__SCREAMING_SNAKE_CASE , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) with open(os.path.join(__SCREAMING_SNAKE_CASE , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files a__ = tokenizer_class.from_pretrained( __SCREAMING_SNAKE_CASE , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained a__ = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=__SCREAMING_SNAKE_CASE )] a__ = tokenizer_class.from_pretrained( __SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" a__ = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([1_7_8] ) , '�' ) def lowercase__ ( self ) -> str: """simple docstring""" pass def lowercase__ ( self ) -> List[str]: """simple docstring""" pass def lowercase__ ( self ) -> Dict: """simple docstring""" pass def lowercase__ ( self ) -> List[str]: """simple docstring""" pass def lowercase__ ( self ) -> Union[str, Any]: """simple docstring""" a__ = self.get_tokenizers(fast=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): a__ = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]'] a__ = tokenizer.convert_tokens_to_string(__SCREAMING_SNAKE_CASE ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
273
1
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __a ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE__ : List[Any] = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def snake_case_ ( self , a__=0 ): _lowerCamelCase = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(a__ ) ) _lowerCamelCase = np.random.RandomState(a__ ) _lowerCamelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def snake_case_ ( self ): _lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=a__ ) _lowerCamelCase = self.get_dummy_inputs() _lowerCamelCase = pipe(**a__ ).images _lowerCamelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_28, 1_28, 3) _lowerCamelCase = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def snake_case_ ( self ): _lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) _lowerCamelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ ) pipe.set_progress_bar_config(disable=a__ ) _lowerCamelCase = self.get_dummy_inputs() _lowerCamelCase = pipe(**a__ ).images _lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) _lowerCamelCase = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def snake_case_ ( self ): _lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) _lowerCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) # warmup pass to apply optimizations _lowerCamelCase = pipe(**self.get_dummy_inputs() ) _lowerCamelCase = self.get_dummy_inputs() _lowerCamelCase = pipe(**a__ ).images _lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) _lowerCamelCase = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def snake_case_ ( self ): _lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) _lowerCamelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) _lowerCamelCase = self.get_dummy_inputs() _lowerCamelCase = pipe(**a__ ).images _lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) _lowerCamelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def snake_case_ ( self ): _lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) _lowerCamelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) _lowerCamelCase = self.get_dummy_inputs() _lowerCamelCase = pipe(**a__ ).images _lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) _lowerCamelCase = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def snake_case_ ( self ): _lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) _lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=a__ ) _lowerCamelCase = self.get_dummy_inputs() _lowerCamelCase = pipe(**a__ ).images _lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) _lowerCamelCase = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class __a ( unittest.TestCase ): @property def snake_case_ ( self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def snake_case_ ( self ): _lowerCamelCase = ort.SessionOptions() _lowerCamelCase = False return options def snake_case_ ( self ): _lowerCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) _lowerCamelCase = init_image.resize((7_68, 5_12) ) # using the PNDM scheduler by default _lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=a__ ) _lowerCamelCase = 'A fantasy landscape, trending on artstation' _lowerCamelCase = np.random.RandomState(0 ) _lowerCamelCase = pipe( prompt=a__ , image=a__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type='np' , ) _lowerCamelCase = output.images _lowerCamelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) _lowerCamelCase = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def snake_case_ ( self ): _lowerCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) _lowerCamelCase = init_image.resize((7_68, 5_12) ) _lowerCamelCase = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' ) _lowerCamelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=a__ , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=a__ ) _lowerCamelCase = 'A fantasy landscape, trending on artstation' _lowerCamelCase = np.random.RandomState(0 ) _lowerCamelCase = pipe( prompt=a__ , image=a__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type='np' , ) _lowerCamelCase = output.images _lowerCamelCase = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) _lowerCamelCase = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
222
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Optional[Any] =logging.get_logger(__name__) A_ : Optional[Any] ={ """facebook/s2t-wav2vec2-large-en-de""": ( """https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class __a ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE__ : int = "speech_to_text_2" SCREAMING_SNAKE_CASE__ : int = ["past_key_values"] SCREAMING_SNAKE_CASE__ : Any = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"} def __init__( self , a__=1_00_00 , a__=6 , a__=20_48 , a__=4 , a__=0.0 , a__=True , a__="relu" , a__=2_56 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.02 , a__=2 , a__=True , a__=1 , a__=0 , a__=2 , a__=10_24 , **a__ , ): _lowerCamelCase = vocab_size _lowerCamelCase = d_model _lowerCamelCase = decoder_ffn_dim _lowerCamelCase = decoder_layers _lowerCamelCase = decoder_attention_heads _lowerCamelCase = dropout _lowerCamelCase = attention_dropout _lowerCamelCase = activation_dropout _lowerCamelCase = activation_function _lowerCamelCase = init_std _lowerCamelCase = decoder_layerdrop _lowerCamelCase = use_cache _lowerCamelCase = decoder_layers _lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True _lowerCamelCase = max_target_positions super().__init__( pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , decoder_start_token_id=a__ , **a__ , )
222
1