code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) a_ = logging.get_logger(__name__) a_ = OrderedDict( [ ('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''), ('''beit''', '''BeitFeatureExtractor'''), ('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''), ('''clap''', '''ClapFeatureExtractor'''), ('''clip''', '''CLIPFeatureExtractor'''), ('''clipseg''', '''ViTFeatureExtractor'''), ('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''), ('''convnext''', '''ConvNextFeatureExtractor'''), ('''cvt''', '''ConvNextFeatureExtractor'''), ('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''), ('''data2vec-vision''', '''BeitFeatureExtractor'''), ('''deformable_detr''', '''DeformableDetrFeatureExtractor'''), ('''deit''', '''DeiTFeatureExtractor'''), ('''detr''', '''DetrFeatureExtractor'''), ('''dinat''', '''ViTFeatureExtractor'''), ('''donut-swin''', '''DonutFeatureExtractor'''), ('''dpt''', '''DPTFeatureExtractor'''), ('''encodec''', '''EncodecFeatureExtractor'''), ('''flava''', '''FlavaFeatureExtractor'''), ('''glpn''', '''GLPNFeatureExtractor'''), ('''groupvit''', '''CLIPFeatureExtractor'''), ('''hubert''', '''Wav2Vec2FeatureExtractor'''), ('''imagegpt''', '''ImageGPTFeatureExtractor'''), ('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''), ('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''), ('''levit''', '''LevitFeatureExtractor'''), ('''maskformer''', '''MaskFormerFeatureExtractor'''), ('''mctct''', '''MCTCTFeatureExtractor'''), ('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''), ('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''), ('''mobilevit''', '''MobileViTFeatureExtractor'''), ('''nat''', '''ViTFeatureExtractor'''), ('''owlvit''', '''OwlViTFeatureExtractor'''), ('''perceiver''', '''PerceiverFeatureExtractor'''), ('''poolformer''', '''PoolFormerFeatureExtractor'''), ('''regnet''', '''ConvNextFeatureExtractor'''), ('''resnet''', '''ConvNextFeatureExtractor'''), ('''segformer''', '''SegformerFeatureExtractor'''), ('''sew''', '''Wav2Vec2FeatureExtractor'''), ('''sew-d''', '''Wav2Vec2FeatureExtractor'''), ('''speech_to_text''', '''Speech2TextFeatureExtractor'''), ('''speecht5''', '''SpeechT5FeatureExtractor'''), ('''swiftformer''', '''ViTFeatureExtractor'''), ('''swin''', '''ViTFeatureExtractor'''), ('''swinv2''', '''ViTFeatureExtractor'''), ('''table-transformer''', '''DetrFeatureExtractor'''), ('''timesformer''', '''VideoMAEFeatureExtractor'''), ('''tvlt''', '''TvltFeatureExtractor'''), ('''unispeech''', '''Wav2Vec2FeatureExtractor'''), ('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''), ('''van''', '''ConvNextFeatureExtractor'''), ('''videomae''', '''VideoMAEFeatureExtractor'''), ('''vilt''', '''ViltFeatureExtractor'''), ('''vit''', '''ViTFeatureExtractor'''), ('''vit_mae''', '''ViTFeatureExtractor'''), ('''vit_msn''', '''ViTFeatureExtractor'''), ('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''), ('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''), ('''wavlm''', '''Wav2Vec2FeatureExtractor'''), ('''whisper''', '''WhisperFeatureExtractor'''), ('''xclip''', '''CLIPFeatureExtractor'''), ('''yolos''', '''YolosFeatureExtractor'''), ] ) a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def _a ( UpperCamelCase_ : Optional[Any] ) -> Union[str, Any]: """simple docstring""" for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: lowerCAmelCase__ = model_type_to_module_name(_A ) lowerCAmelCase__ = importlib.import_module(F".{module_name}" , "transformers.models" ) try: return getattr(_A , _A ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(_A , "__name__" , _A ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. lowerCAmelCase__ = importlib.import_module("transformers" ) if hasattr(_A , _A ): return getattr(_A , _A ) return None def _a ( UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Tuple = False , UpperCamelCase_ : Any = False , UpperCamelCase_ : List[str] = None , UpperCamelCase_ : int = None , UpperCamelCase_ : Dict = None , UpperCamelCase_ : Optional[int] = False , **UpperCamelCase_ : Dict , ) -> List[str]: """simple docstring""" lowerCAmelCase__ = get_file_from_repo( _A , _A , cache_dir=_A , force_download=_A , resume_download=_A , proxies=_A , use_auth_token=_A , revision=_A , local_files_only=_A , ) if resolved_config_file is None: logger.info( "Could not locate the feature extractor configuration file, will try to use the model config instead." ) return {} with open(_A , encoding="utf-8" ) as reader: return json.load(_A ) class lowercase__ : def __init__( self )-> Any: '''simple docstring''' raise EnvironmentError( "AutoFeatureExtractor is designed to be instantiated " "using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." ) @classmethod @replace_list_option_in_docstrings(__lowerCamelCase ) def UpperCAmelCase ( cls , __UpperCAmelCase , **__UpperCAmelCase )-> List[str]: '''simple docstring''' lowerCAmelCase__ = kwargs.pop("config" , __lowerCamelCase ) lowerCAmelCase__ = kwargs.pop("trust_remote_code" , __lowerCamelCase ) lowerCAmelCase__ = True lowerCAmelCase__ , lowerCAmelCase__ = FeatureExtractionMixin.get_feature_extractor_dict(__lowerCamelCase , **__lowerCamelCase ) lowerCAmelCase__ = config_dict.get("feature_extractor_type" , __lowerCamelCase ) lowerCAmelCase__ = None if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ): lowerCAmelCase__ = config_dict["auto_map"]["AutoFeatureExtractor"] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(__lowerCamelCase , __lowerCamelCase ): lowerCAmelCase__ = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase ) # It could be in `config.feature_extractor_type`` lowerCAmelCase__ = getattr(__lowerCamelCase , "feature_extractor_type" , __lowerCamelCase ) if hasattr(__lowerCamelCase , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map: lowerCAmelCase__ = config.auto_map["AutoFeatureExtractor"] if feature_extractor_class is not None: lowerCAmelCase__ = feature_extractor_class_from_name(__lowerCamelCase ) lowerCAmelCase__ = feature_extractor_auto_map is not None lowerCAmelCase__ = feature_extractor_class is not None or type(__lowerCamelCase ) in FEATURE_EXTRACTOR_MAPPING lowerCAmelCase__ = resolve_trust_remote_code( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if has_remote_code and trust_remote_code: lowerCAmelCase__ = get_class_from_dynamic_module( __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ) lowerCAmelCase__ = kwargs.pop("code_revision" , __lowerCamelCase ) if os.path.isdir(__lowerCamelCase ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(__lowerCamelCase , **__lowerCamelCase ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(__lowerCamelCase , **__lowerCamelCase ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(__lowerCamelCase ) in FEATURE_EXTRACTOR_MAPPING: lowerCAmelCase__ = FEATURE_EXTRACTOR_MAPPING[type(__lowerCamelCase )] return feature_extractor_class.from_dict(__lowerCamelCase , **__lowerCamelCase ) raise ValueError( F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a " F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following " F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" ) @staticmethod def UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase )-> Union[str, Any]: '''simple docstring''' FEATURE_EXTRACTOR_MAPPING.register(__lowerCamelCase , __lowerCamelCase )
340
import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip _SCREAMING_SNAKE_CASE : str = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def UpperCAmelCase_ ( _A ): '''simple docstring''' if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def UpperCAmelCase_ ( _A , _A , _A ): '''simple docstring''' return max(metric_fn(_A , _A ) for gt in ground_truths ) def UpperCAmelCase_ ( _A , _A , _A ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()] SCREAMING_SNAKE_CASE__ = [] if args.gold_data_mode == "qa": SCREAMING_SNAKE_CASE__ = pd.read_csv(_A , sep='''\t''' , header=_A ) for answer_list in data[1]: SCREAMING_SNAKE_CASE__ = ast.literal_eval(_A ) answers.append(_A ) else: SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()] SCREAMING_SNAKE_CASE__ = [[reference] for reference in references] SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0 for prediction, ground_truths in zip(_A , _A ): total += 1 em += metric_max_over_ground_truths(_A , _A , _A ) fa += metric_max_over_ground_truths(_A , _A , _A ) SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total SCREAMING_SNAKE_CASE__ = 1_0_0.0 * fa / total logger.info(F'''F1: {fa:.2f}''' ) logger.info(F'''EM: {em:.2f}''' ) def UpperCAmelCase_ ( _A , _A , _A ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = args.k SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()] SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_A , '''r''' ).readlines()] SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = 0 for hypo, reference in zip(_A , _A ): SCREAMING_SNAKE_CASE__ = set(hypo.split('''\t''' )[:k] ) SCREAMING_SNAKE_CASE__ = set(reference.split('''\t''' ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k SCREAMING_SNAKE_CASE__ = 1_0_0.0 * em / total logger.info(F'''Precision@{k}: {em: .2f}''' ) def UpperCAmelCase_ ( _A , _A , _A ): '''simple docstring''' def strip_title(_A ): if title.startswith('''"''' ): SCREAMING_SNAKE_CASE__ = title[1:] if title.endswith('''"''' ): SCREAMING_SNAKE_CASE__ = title[:-1] return title SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( _A , return_tensors='''pt''' , padding=_A , truncation=_A , )['''input_ids'''].to(args.device ) SCREAMING_SNAKE_CASE__ = rag_model.rag.question_encoder(_A ) SCREAMING_SNAKE_CASE__ = question_enc_outputs[0] SCREAMING_SNAKE_CASE__ = rag_model.retriever( _A , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , ) SCREAMING_SNAKE_CASE__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) SCREAMING_SNAKE_CASE__ = [] for docs in all_docs: SCREAMING_SNAKE_CASE__ = [strip_title(_A ) for title in docs['''title''']] provenance_strings.append('''\t'''.join(_A ) ) return provenance_strings def UpperCAmelCase_ ( _A , _A , _A ): '''simple docstring''' with torch.no_grad(): SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( _A , return_tensors='''pt''' , padding=_A , truncation=_A ) SCREAMING_SNAKE_CASE__ = inputs_dict.input_ids.to(args.device ) SCREAMING_SNAKE_CASE__ = inputs_dict.attention_mask.to(args.device ) SCREAMING_SNAKE_CASE__ = rag_model.generate( # rag_model overwrites generate _A , attention_mask=_A , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_A , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) SCREAMING_SNAKE_CASE__ = rag_model.retriever.generator_tokenizer.batch_decode(_A , skip_special_tokens=_A ) if args.print_predictions: for q, a in zip(_A , _A ): logger.info('''Q: {} - A: {}'''.format(_A , _A ) ) return answers def UpperCAmelCase_ ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument( '''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=_A , help=( '''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the''' ''' model_name_or_path''' ) , ) parser.add_argument( '''--index_name''' , default=_A , choices=['''exact''', '''compressed''', '''legacy'''] , type=_A , help='''RAG model retriever type''' , ) parser.add_argument( '''--index_path''' , default=_A , type=_A , help='''Path to the retrieval index''' , ) parser.add_argument('''--n_docs''' , default=5 , type=_A , help='''Number of retrieved docs''' ) parser.add_argument( '''--model_name_or_path''' , default=_A , type=_A , required=_A , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=_A , help=( '''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates''' ''' precision@k.''' ) , ) parser.add_argument('''--k''' , default=1 , type=_A , help='''k for the precision@k calculation''' ) parser.add_argument( '''--evaluation_set''' , default=_A , type=_A , required=_A , help='''Path to a file containing evaluation samples''' , ) parser.add_argument( '''--gold_data_path''' , default=_A , type=_A , required=_A , help='''Path to a tab-separated file with gold samples''' , ) parser.add_argument( '''--gold_data_mode''' , default='''qa''' , type=_A , choices=['''qa''', '''ans'''] , help=( '''Format of the gold data file''' '''qa - a single line in the following format: question [tab] answer_list''' '''ans - a single line of the gold file contains the expected answer string''' ) , ) parser.add_argument( '''--predictions_path''' , type=_A , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , ) parser.add_argument( '''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , ) parser.add_argument( '''--eval_batch_size''' , default=8 , type=_A , help='''Batch size per GPU/CPU for evaluation.''' , ) parser.add_argument( '''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , ) parser.add_argument( '''--num_beams''' , default=4 , type=_A , help='''Number of beams to be used when generating answers''' , ) parser.add_argument('''--min_length''' , default=1 , type=_A , help='''Min length of the generated answers''' ) parser.add_argument('''--max_length''' , default=50 , type=_A , help='''Max length of the generated answers''' ) parser.add_argument( '''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , ) parser.add_argument( '''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , ) SCREAMING_SNAKE_CASE__ = parser.parse_args() SCREAMING_SNAKE_CASE__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) return args def UpperCAmelCase_ ( _A ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = {} if args.model_type is None: SCREAMING_SNAKE_CASE__ = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith('''rag''' ): SCREAMING_SNAKE_CASE__ = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration SCREAMING_SNAKE_CASE__ = args.n_docs if args.index_name is not None: SCREAMING_SNAKE_CASE__ = args.index_name if args.index_path is not None: SCREAMING_SNAKE_CASE__ = args.index_path else: SCREAMING_SNAKE_CASE__ = BartForConditionalGeneration SCREAMING_SNAKE_CASE__ = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info('''Evaluate the following checkpoints: %s''' , _A ) SCREAMING_SNAKE_CASE__ = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k SCREAMING_SNAKE_CASE__ = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) ) score_fn(_A , args.predictions_path , args.gold_data_path ) continue logger.info('''***** Running evaluation for {} *****'''.format(_A ) ) logger.info(''' Batch size = %d''' , args.eval_batch_size ) logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) ) if args.model_type.startswith('''rag''' ): SCREAMING_SNAKE_CASE__ = RagRetriever.from_pretrained(_A , **_A ) SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , retriever=_A , **_A ) model.retriever.init_retrieval() else: SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_A , **_A ) model.to(args.device ) with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file: SCREAMING_SNAKE_CASE__ = [] for line in tqdm(_A ): questions.append(line.strip() ) if len(_A ) == args.eval_batch_size: SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A ) preds_file.write('''\n'''.join(_A ) + '''\n''' ) preds_file.flush() SCREAMING_SNAKE_CASE__ = [] if len(_A ) > 0: SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_A , _A , _A ) preds_file.write('''\n'''.join(_A ) ) preds_file.flush() score_fn(_A , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE : int = get_args() main(args)
314
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging _UpperCamelCase = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase__ ( _lowerCAmelCase ): SCREAMING_SNAKE_CASE = ["pixel_values"] def __init__( self ,A = True ,A = None ,A = PILImageResampling.BICUBIC ,A = True ,A = None ,A = True ,A = 1 / 255 ,A = True ,A = None ,A = None ,A = True ,**A ,): super().__init__(**_lowercase ) UpperCAmelCase = size if size is not None else {"""shortest_edge""": 224} UpperCAmelCase = get_size_dict(_lowercase ,default_to_square=_lowercase ) UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} UpperCAmelCase = get_size_dict(_lowercase ,default_to_square=_lowercase ,param_name="""crop_size""" ) UpperCAmelCase = do_resize UpperCAmelCase = size UpperCAmelCase = resample UpperCAmelCase = do_center_crop UpperCAmelCase = crop_size UpperCAmelCase = do_rescale UpperCAmelCase = rescale_factor UpperCAmelCase = do_normalize UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD UpperCAmelCase = do_convert_rgb def _UpperCamelCase ( self ,A ,A ,A = PILImageResampling.BICUBIC ,A = None ,**A ,): UpperCAmelCase = get_size_dict(_lowercase ,default_to_square=_lowercase ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) UpperCAmelCase = get_resize_output_image_size(_lowercase ,size=size["""shortest_edge"""] ,default_to_square=_lowercase ) return resize(_lowercase ,size=_lowercase ,resample=_lowercase ,data_format=_lowercase ,**_lowercase ) def _UpperCamelCase ( self ,A ,A ,A = None ,**A ,): UpperCAmelCase = get_size_dict(_lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(_lowercase ,size=(size["""height"""], size["""width"""]) ,data_format=_lowercase ,**_lowercase ) def _UpperCamelCase ( self ,A ,A ,A = None ,**A ,): return rescale(_lowercase ,scale=_lowercase ,data_format=_lowercase ,**_lowercase ) def _UpperCamelCase ( self ,A ,A ,A ,A = None ,**A ,): return normalize(_lowercase ,mean=_lowercase ,std=_lowercase ,data_format=_lowercase ,**_lowercase ) def _UpperCamelCase ( self ,A ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = ChannelDimension.FIRST ,**A ,): UpperCAmelCase = do_resize if do_resize is not None else self.do_resize UpperCAmelCase = size if size is not None else self.size UpperCAmelCase = get_size_dict(_lowercase ,param_name="""size""" ,default_to_square=_lowercase ) UpperCAmelCase = resample if resample is not None else self.resample UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase = crop_size if crop_size is not None else self.crop_size UpperCAmelCase = get_size_dict(_lowercase ,param_name="""crop_size""" ,default_to_square=_lowercase ) UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase = image_mean if image_mean is not None else self.image_mean UpperCAmelCase = image_std if image_std is not None else self.image_std UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb UpperCAmelCase = make_list_of_images(_lowercase ) if not valid_images(_lowercase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: UpperCAmelCase = [convert_to_rgb(_lowercase ) for image in images] # All transformations expect numpy arrays. UpperCAmelCase = [to_numpy_array(_lowercase ) for image in images] if do_resize: UpperCAmelCase = [self.resize(image=_lowercase ,size=_lowercase ,resample=_lowercase ) for image in images] if do_center_crop: UpperCAmelCase = [self.center_crop(image=_lowercase ,size=_lowercase ) for image in images] if do_rescale: UpperCAmelCase = [self.rescale(image=_lowercase ,scale=_lowercase ) for image in images] if do_normalize: UpperCAmelCase = [self.normalize(image=_lowercase ,mean=_lowercase ,std=_lowercase ) for image in images] UpperCAmelCase = [to_channel_dimension_format(_lowercase ,_lowercase ) for image in images] UpperCAmelCase = {"""pixel_values""": images} return BatchFeature(data=_lowercase ,tensor_type=_lowercase )
370
"""simple docstring""" import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset _UpperCamelCase = pd.read_csv( """https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/""" """position_salaries.csv""" ) _UpperCamelCase = dataset.iloc[:, 1:2].values _UpperCamelCase = dataset.iloc[:, 2].values _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = train_test_split(X, y, test_size=0.2, random_state=0) _UpperCamelCase = PolynomialFeatures(degree=4) _UpperCamelCase = poly_reg.fit_transform(X) _UpperCamelCase = LinearRegression() pol_reg.fit(X_poly, y) def _a ( ): """simple docstring""" plt.scatter(_snake_case , _snake_case , color="""red""" ) plt.plot(_snake_case , pol_reg.predict(poly_reg.fit_transform(_snake_case ) ) , color="""blue""" ) plt.title("""Truth or Bluff (Linear Regression)""" ) plt.xlabel("""Position level""" ) plt.ylabel("""Salary""" ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
234
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase = { 'configuration_clipseg': [ 'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CLIPSegConfig', 'CLIPSegTextConfig', 'CLIPSegVisionConfig', ], 'processing_clipseg': ['CLIPSegProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ 'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST', 'CLIPSegModel', 'CLIPSegPreTrainedModel', 'CLIPSegTextModel', 'CLIPSegVisionModel', 'CLIPSegForImageSegmentation', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
'''simple docstring''' from __future__ import annotations def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ): if len(_SCREAMING_SNAKE_CASE ) == 0: return [] _snake_case, _snake_case = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE ) _snake_case = int(max_value - min_value ) + 1 _snake_case = [[] for _ in range(_SCREAMING_SNAKE_CASE )] for i in my_list: buckets[int(i - min_value )].append(_SCREAMING_SNAKE_CASE ) return [v for bucket in buckets for v in sorted(_SCREAMING_SNAKE_CASE )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
341
1
import re from filelock import FileLock try: import nltk _lowerCamelCase = True except (ImportError, ModuleNotFoundError): _lowerCamelCase = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> str: re.sub('''<n>''' , '''''' , __UpperCamelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__UpperCamelCase ) )
351
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict=2_8123 ) -> str: UpperCAmelCase_ = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i UpperCAmelCase_ = set() UpperCAmelCase_ = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(__UpperCamelCase ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
177
0
def UpperCamelCase ( __magic_name__ : float , __magic_name__ : float ) -> float: """simple docstring""" return price * (1 + tax_rate) if __name__ == "__main__": print(F'{price_plus_tax(1_0_0, 0.25) = }') print(F'{price_plus_tax(125.50, 0.05) = }')
305
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss UpperCAmelCase__ = pytest.mark.integration @require_faiss class lowercase_ ( lowercase ): '''simple docstring''' def __lowerCAmelCase ( self : Tuple ) ->Any: """simple docstring""" a = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(__UpperCAmelCase ) for x in np.arange(30 ).tolist()]} ) return dset def __lowerCAmelCase ( self : Tuple ) ->Any: """simple docstring""" import faiss a = self._create_dummy_dataset() a = dset.map( lambda __UpperCAmelCase , __UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase ) a = dset.add_faiss_index('''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) a , a = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' ) dset.drop_index('''vecs''' ) def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]: """simple docstring""" import faiss a = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) a , a = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' ) def __lowerCAmelCase ( self : Any ) ->Union[str, Any]: """simple docstring""" import faiss a = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file: dset.save_faiss_index('''vecs''' , tmp_file.name ) dset.load_faiss_index('''vecs2''' , tmp_file.name ) os.unlink(tmp_file.name ) a , a = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' ) def __lowerCAmelCase ( self : Optional[Any] ) ->Any: """simple docstring""" a = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' ) dset.drop_index('''vecs''' ) self.assertRaises(__UpperCAmelCase , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) ) def __lowerCAmelCase ( self : List[Any] ) ->List[str]: """simple docstring""" from elasticsearch import Elasticsearch a = self._create_dummy_dataset() with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch( '''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk: a = {'''acknowledged''': True} mocked_bulk.return_value([(True, None)] * 30 ) a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}} a = Elasticsearch() dset.add_elasticsearch_index('''filename''' , es_client=__UpperCAmelCase ) a , a = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' ) self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' ) @require_faiss class lowercase_ ( lowercase ): '''simple docstring''' def __lowerCAmelCase ( self : Any ) ->Any: """simple docstring""" import faiss a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query a = np.zeros(5 , dtype=np.floataa ) a = 1 a , a = index.search(__UpperCAmelCase ) self.assertRaises(__UpperCAmelCase , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries a = np.eye(5 , dtype=np.floataa )[::-1] a , a = index.search_batch(__UpperCAmelCase ) self.assertRaises(__UpperCAmelCase , index.search_batch , queries[0] ) a = [scores[0] for scores in total_scores] a = [indices[0] for indices in total_indices] self.assertGreater(np.min(__UpperCAmelCase ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , __UpperCAmelCase ) def __lowerCAmelCase ( self : Any ) ->List[Any]: """simple docstring""" import faiss a = FaissIndex(string_factory='''Flat''' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) a = FaissIndex(string_factory='''LSH''' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(__UpperCAmelCase ): a = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) ) def __lowerCAmelCase ( self : int ) ->Optional[Any]: """simple docstring""" import faiss a = faiss.IndexFlat(5 ) a = FaissIndex(custom_index=__UpperCAmelCase ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __lowerCAmelCase ( self : int ) ->Dict: """simple docstring""" import faiss a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file: index.save(tmp_file.name ) a = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) a = np.zeros(5 , dtype=np.floataa ) a = 1 a , a = index.search(__UpperCAmelCase ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def _a ( a :Dict ) -> Any: import faiss a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) a = '''index.faiss''' a = F"""mock://{index_name}""" index.save(a , storage_options=mockfs.storage_options ) a = FaissIndex.load(a , storage_options=mockfs.storage_options ) a = np.zeros(5 , dtype=np.floataa ) a = 1 a , a = index.search(a ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class lowercase_ ( lowercase ): '''simple docstring''' def __lowerCAmelCase ( self : int ) ->List[Any]: """simple docstring""" from elasticsearch import Elasticsearch with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch( '''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk: a = Elasticsearch() a = {'''acknowledged''': True} a = ElasticSearchIndex(es_client=__UpperCAmelCase ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['''foo''', '''bar''', '''foobar'''] ) # single query a = '''foo''' a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}} a , a = index.search(__UpperCAmelCase ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout a = '''foo''' a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}} a , a = index.search(__UpperCAmelCase , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries a = ['''foo''', '''bar''', '''foobar'''] a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}} a , a = index.search_batch(__UpperCAmelCase ) a = [scores[0] for scores in total_scores] a = [indices[0] for indices in total_indices] self.assertGreater(np.min(__UpperCAmelCase ) , 0 ) self.assertListEqual([1, 1, 1] , __UpperCAmelCase ) # batched queries with timeout a = ['''foo''', '''bar''', '''foobar'''] a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}} a , a = index.search_batch(__UpperCAmelCase , request_timeout=30 ) a = [scores[0] for scores in total_scores] a = [indices[0] for indices in total_indices] self.assertGreater(np.min(__UpperCAmelCase ) , 0 ) self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
0
0
def lowercase__ ( __snake_case : str , __snake_case : str ): '''simple docstring''' def get_matched_characters(__snake_case : str , __snake_case : str ) -> str: UpperCAmelCase_ : List[Any] = [] UpperCAmelCase_ : Any = min(len(_stra ) , len(_stra ) ) // 2 for i, l in enumerate(_stra ): UpperCAmelCase_ : str = int(max(0 , i - limit ) ) UpperCAmelCase_ : Optional[Any] = int(min(i + limit + 1 , len(_stra ) ) ) if l in _stra[left:right]: matched.append(__snake_case ) UpperCAmelCase_ : List[str] = F"{_stra[0:_stra.index(__snake_case )]} {_stra[_stra.index(__snake_case ) + 1:]}" return "".join(__snake_case ) # matching characters UpperCAmelCase_ : List[Any] = get_matched_characters(__snake_case , __snake_case ) UpperCAmelCase_ : Union[str, Any] = get_matched_characters(__snake_case , __snake_case ) UpperCAmelCase_ : List[Any] = len(__snake_case ) # transposition UpperCAmelCase_ : List[str] = ( len([(ca, ca) for ca, ca in zip(__snake_case , __snake_case ) if ca != ca] ) // 2 ) if not match_count: UpperCAmelCase_ : Optional[int] = 0.0 else: UpperCAmelCase_ : Tuple = ( 1 / 3 * ( match_count / len(__snake_case ) + match_count / len(__snake_case ) + (match_count - transpositions) / match_count ) ) # common prefix up to 4 characters UpperCAmelCase_ : Optional[Any] = 0 for ca, ca in zip(stra[:4] , stra[:4] ): if ca == ca: prefix_len += 1 else: break return jaro + 0.1 * prefix_len * (1 - jaro) if __name__ == "__main__": import doctest doctest.testmod() print(jaro_winkler('hello', 'world'))
357
def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] ): '''simple docstring''' UpperCAmelCase_ : Any = '' for i in table: res += inp[i - 1] return res def lowercase__ ( __snake_case : Optional[int] ): '''simple docstring''' return data[1:] + data[0] def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : Optional[int] ): '''simple docstring''' UpperCAmelCase_ : Any = '' for i in range(len(__snake_case ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def lowercase__ ( __snake_case : Dict , __snake_case : Optional[int] ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = int('0b' + data[0] + data[-1] , 2 ) UpperCAmelCase_ : Any = int('0b' + data[1:3] , 2 ) return bin(s[row][col] )[2:] def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : str ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = message[:4] UpperCAmelCase_ : Optional[int] = message[4:] UpperCAmelCase_ : Union[str, Any] = apply_table(__snake_case , __snake_case ) UpperCAmelCase_ : Union[str, Any] = xor(__snake_case , __snake_case ) UpperCAmelCase_ : Dict = apply_sbox(__snake_case , temp[:4] ) # noqa: E741 UpperCAmelCase_ : Any = apply_sbox(__snake_case , temp[4:] ) UpperCAmelCase_ : str = '0' * (2 - len(__snake_case )) + l # noqa: E741 UpperCAmelCase_ : int = '0' * (2 - len(__snake_case )) + r UpperCAmelCase_ : int = apply_table(l + r , __snake_case ) UpperCAmelCase_ : int = xor(__snake_case , __snake_case ) return temp + right if __name__ == "__main__": __UpperCAmelCase = input('Enter 10 bit key: ') __UpperCAmelCase = input('Enter 8 bit message: ') __UpperCAmelCase = [6, 3, 7, 4, 8, 5, 10, 9] __UpperCAmelCase = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] __UpperCAmelCase = [2, 4, 3, 1] __UpperCAmelCase = [2, 6, 3, 1, 4, 8, 5, 7] __UpperCAmelCase = [4, 1, 3, 5, 7, 2, 8, 6] __UpperCAmelCase = [4, 1, 2, 3, 2, 3, 4, 1] __UpperCAmelCase = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] __UpperCAmelCase = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation __UpperCAmelCase = apply_table(key, paa_table) __UpperCAmelCase = temp[:5] __UpperCAmelCase = temp[5:] __UpperCAmelCase = left_shift(left) __UpperCAmelCase = left_shift(right) __UpperCAmelCase = apply_table(left + right, pa_table) __UpperCAmelCase = left_shift(left) __UpperCAmelCase = left_shift(right) __UpperCAmelCase = left_shift(left) __UpperCAmelCase = left_shift(right) __UpperCAmelCase = apply_table(left + right, pa_table) # encryption __UpperCAmelCase = apply_table(message, IP) __UpperCAmelCase = function(expansion, sa, sa, keya, temp) __UpperCAmelCase = temp[4:] + temp[:4] __UpperCAmelCase = function(expansion, sa, sa, keya, temp) __UpperCAmelCase = apply_table(temp, IP_inv) print('Cipher text is:', CT) # decryption __UpperCAmelCase = apply_table(CT, IP) __UpperCAmelCase = function(expansion, sa, sa, keya, temp) __UpperCAmelCase = temp[4:] + temp[:4] __UpperCAmelCase = function(expansion, sa, sa, keya, temp) __UpperCAmelCase = apply_table(temp, IP_inv) print('Plain text after decypting is:', PT)
145
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json' ), } class _lowerCamelCase ( lowercase_ ): UpperCAmelCase_ = "dpr" def __init__(self , __a=3_05_22 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=2 , __a=0.02 , __a=1e-1_2 , __a=0 , __a="absolute" , __a = 0 , **__a , ) -> Union[str, Any]: super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase ) UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = hidden_act UpperCamelCase = intermediate_size UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = initializer_range UpperCamelCase = layer_norm_eps UpperCamelCase = projection_dim UpperCamelCase = position_embedding_type
153
"""simple docstring""" class _UpperCAmelCase : def __init__( self :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Tuple ): A = name A = val def __str__( self :str ): return f"{self.__class__.__name__}({self.name}, {self.val})" def __lt__( self :List[Any] , __UpperCamelCase :Union[str, Any] ): return self.val < other.val class _UpperCAmelCase : def __init__( self :List[str] , __UpperCamelCase :Optional[Any] ): A = {} A = {} A = self.build_heap(__UpperCamelCase ) def __getitem__( self :int , __UpperCamelCase :Optional[int] ): return self.get_value(__UpperCamelCase ) def lowerCamelCase ( self :List[Any] , __UpperCamelCase :str ): return (idx - 1) // 2 def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ): return idx * 2 + 1 def lowerCamelCase ( self :Union[str, Any] , __UpperCamelCase :Optional[int] ): return idx * 2 + 2 def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :str ): return self.heap_dict[key] def lowerCamelCase ( self :int , __UpperCamelCase :Optional[Any] ): A = len(__UpperCamelCase ) - 1 A = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): A = idx A = i.val for i in range(__UpperCamelCase , -1 , -1 ): self.sift_down(__UpperCamelCase , __UpperCamelCase ) return array def lowerCamelCase ( self :str , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Dict ): while True: A = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 A = self.get_right_child_idx(__UpperCamelCase ) A = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: A = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: A = r if smallest != idx: A, A = array[smallest], array[idx] ( ( A ), ( A ), ) = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) A = smallest else: break def lowerCamelCase ( self :Optional[Any] , __UpperCamelCase :Optional[int] ): A = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: A, A = self.heap[idx], self.heap[p] A, A = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) A = p A = self.get_parent_idx(__UpperCamelCase ) def lowerCamelCase ( self :Any ): return self.heap[0] def lowerCamelCase ( self :Tuple ): A, A = self.heap[-1], self.heap[0] A, A = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) A = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :Optional[int] ): self.heap.append(__UpperCamelCase ) A = len(self.heap ) - 1 A = node.val self.sift_up(len(self.heap ) - 1 ) def lowerCamelCase ( self :Tuple ): return len(self.heap ) == 0 def lowerCamelCase ( self :Any , __UpperCamelCase :str , __UpperCamelCase :Dict ): assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" A = new_value A = new_value self.sift_up(self.idx_of_element[node] ) _snake_case : Optional[int] = Node('R', -1) _snake_case : Tuple = Node('B', 6) _snake_case : Tuple = Node('A', 3) _snake_case : Optional[int] = Node('X', 1) _snake_case : List[Any] = Node('E', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array _snake_case : Tuple = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('Min Heap - before decrease key') for i in my_min_heap.heap: print(i) print('Min Heap - After decrease key of node [B -> -17]') my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
292
0
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def A ( lowercase ) -> Dict: '''simple docstring''' UpperCamelCase , UpperCamelCase = image.size UpperCamelCase , UpperCamelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 UpperCamelCase = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) UpperCamelCase = np.array(lowercase ).astype(np.floataa ) / 2_5_5.0 UpperCamelCase = image[None].transpose(0 , 3 , 1 , 2 ) UpperCamelCase = torch.from_numpy(lowercase ) return 2.0 * image - 1.0 class lowercase ( _SCREAMING_SNAKE_CASE ): def __init__( self , A_ , A_ , A_ , ) -> Dict: """simple docstring""" super().__init__() self.register_modules(vqvae=A_ , unet=A_ , scheduler=A_ ) @torch.no_grad() def __call__( self , A_ = None , A_ = 1 , A_ = 100 , A_ = 0.0 , A_ = None , A_ = "pil" , A_ = True , ) -> Union[Tuple, ImagePipelineOutput]: """simple docstring""" if isinstance(A_ , PIL.Image.Image ): UpperCamelCase = 1 elif isinstance(A_ , torch.Tensor ): UpperCamelCase = image.shape[0] else: raise ValueError(F'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(A_ )}''' ) if isinstance(A_ , PIL.Image.Image ): UpperCamelCase = preprocess(A_ ) UpperCamelCase , UpperCamelCase = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image UpperCamelCase = (batch_size, self.unet.config.in_channels // 2, height, width) UpperCamelCase = next(self.unet.parameters() ).dtype UpperCamelCase = randn_tensor(A_ , generator=A_ , device=self.device , dtype=A_ ) UpperCamelCase = image.to(device=self.device , dtype=A_ ) # set timesteps and move to the correct device self.scheduler.set_timesteps(A_ , device=self.device ) UpperCamelCase = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase = {} if accepts_eta: UpperCamelCase = eta for t in self.progress_bar(A_ ): # concat latents and low resolution image in the channel dimension. UpperCamelCase = torch.cat([latents, image] , dim=1 ) UpperCamelCase = self.scheduler.scale_model_input(A_ , A_ ) # predict the noise residual UpperCamelCase = self.unet(A_ , A_ ).sample # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase = self.scheduler.step(A_ , A_ , A_ , **A_ ).prev_sample # decode the image latents with the VQVAE UpperCamelCase = self.vqvae.decode(A_ ).sample UpperCamelCase = torch.clamp(A_ , -1.0 , 1.0 ) UpperCamelCase = image / 2 + 0.5 UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase = self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ )
110
import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class lowercase : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple: """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_labels UpperCamelCase = num_choices UpperCamelCase = scope def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = None if self.use_input_mask: UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase = None if self.use_token_type_ids: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int: """simple docstring""" UpperCamelCase = LlamaModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ , attention_mask=A_ ) UpperCamelCase = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> List[Any]: """simple docstring""" UpperCamelCase = True UpperCamelCase = LlamaModel(A_ ) model.to(A_ ) model.eval() UpperCamelCase = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , ) UpperCamelCase = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , ) UpperCamelCase = model(A_ , attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> str: """simple docstring""" UpperCamelCase = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]: """simple docstring""" UpperCamelCase = True UpperCamelCase = True UpperCamelCase = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() # first forward pass UpperCamelCase = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , ) UpperCamelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCamelCase = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['hidden_states'][0] UpperCamelCase = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['hidden_states'][0] # select random slice UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCamelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1e-3 ) ) def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): __lowercase : str = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () __lowercase : str = (LlamaForCausalLM,) if is_torch_available() else () __lowercase : Any = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) __lowercase : int = False __lowercase : Optional[int] = False def __UpperCamelCase ( self ) -> int: """simple docstring""" UpperCamelCase = LlamaModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=A_ , hidden_size=37 ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() def __UpperCamelCase ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase ( self ) -> Optional[Any]: """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase = type self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = 3 UpperCamelCase = input_dict['input_ids'] UpperCamelCase = input_ids.ne(1 ).to(A_ ) UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCamelCase = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __UpperCamelCase ( self ) -> Dict: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = 3 UpperCamelCase = 'single_label_classification' UpperCamelCase = input_dict['input_ids'] UpperCamelCase = input_ids.ne(1 ).to(A_ ) UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCamelCase = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = 3 UpperCamelCase = 'multi_label_classification' UpperCamelCase = input_dict['input_ids'] UpperCamelCase = input_ids.ne(1 ).to(A_ ) UpperCamelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCamelCase = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() UpperCamelCase = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('LLaMA buffers include complex numbers, which breaks this test' ) def __UpperCamelCase ( self ) -> Union[str, Any]: """simple docstring""" pass @parameterized.expand([('linear',), ('dynamic',)] ) def __UpperCamelCase ( self , A_ ) -> Optional[int]: """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = ids_tensor([1, 10] , config.vocab_size ) UpperCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCamelCase = LlamaModel(A_ ) original_model.to(A_ ) original_model.eval() UpperCamelCase = original_model(A_ ).last_hidden_state UpperCamelCase = original_model(A_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCamelCase = {'type': scaling_type, 'factor': 10.0} UpperCamelCase = LlamaModel(A_ ) scaled_model.to(A_ ) scaled_model.eval() UpperCamelCase = scaled_model(A_ ).last_hidden_state UpperCamelCase = scaled_model(A_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A_ , A_ , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A_ , A_ , atol=1e-5 ) ) @require_torch class lowercase ( unittest.TestCase ): @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' ) UpperCamelCase = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 UpperCamelCase = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off UpperCamelCase = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' ) UpperCamelCase = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 UpperCamelCase = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off UpperCamelCase = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def __UpperCamelCase ( self ) -> List[Any]: """simple docstring""" UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' ) UpperCamelCase = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 UpperCamelCase = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off UpperCamelCase = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 ) @unittest.skip( 'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' ) @slow def __UpperCamelCase ( self ) -> List[str]: """simple docstring""" UpperCamelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' ) UpperCamelCase = model(torch.tensor(A_ ) ) UpperCamelCase = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , A_ , atol=1e-2 , rtol=1e-2 ) # fmt: off UpperCamelCase = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , A_ , atol=1e-5 , rtol=1e-5 ) @unittest.skip('Model is curently gated' ) @slow def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi' UpperCamelCase = 'Simply put, the theory of relativity states that ' UpperCamelCase = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ) UpperCamelCase = tokenizer.encode(A_ , return_tensors='pt' ) UpperCamelCase = LlamaForCausalLM.from_pretrained( 'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=A_ ) # greedy generation outputs UpperCamelCase = model.generate(A_ , max_new_tokens=64 , top_p=A_ , temperature=1 , do_sample=A_ ) UpperCamelCase = tokenizer.decode(generated_ids[0] , skip_special_tokens=A_ ) self.assertEqual(A_ , A_ )
110
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase_ = logging.get_logger(__name__) class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Optional[Any] = """maskformer-swin""" lowerCAmelCase_ : Optional[Any] = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : Dict , _UpperCAmelCase : Optional[Any]=2_24 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : str=3 , _UpperCAmelCase : str=96 , _UpperCAmelCase : List[str]=[2, 2, 6, 2] , _UpperCAmelCase : str=[3, 6, 12, 24] , _UpperCAmelCase : str=7 , _UpperCAmelCase : int=4.0 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=0.0 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Dict=1E-5 , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : Dict , ): """simple docstring""" super().__init__(**_UpperCAmelCase ) UpperCAmelCase__ = image_size UpperCAmelCase__ = patch_size UpperCAmelCase__ = num_channels UpperCAmelCase__ = embed_dim UpperCAmelCase__ = depths UpperCAmelCase__ = len(_UpperCAmelCase ) UpperCAmelCase__ = num_heads UpperCAmelCase__ = window_size UpperCAmelCase__ = mlp_ratio UpperCAmelCase__ = qkv_bias UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = drop_path_rate UpperCAmelCase__ = hidden_act UpperCAmelCase__ = use_absolute_embeddings UpperCAmelCase__ = layer_norm_eps UpperCAmelCase__ = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCAmelCase__ = int(embed_dim * 2 ** (len(_UpperCAmelCase ) - 1) ) UpperCAmelCase__ = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(_UpperCAmelCase ) + 1 )] UpperCAmelCase__ , UpperCAmelCase__ = get_aligned_output_features_output_indices( out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
346
'''simple docstring''' from math import factorial def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 20 ): '''simple docstring''' UpperCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... UpperCAmelCase__ = n // 2 return int(factorial(SCREAMING_SNAKE_CASE__ ) / (factorial(SCREAMING_SNAKE_CASE__ ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(2_0)) else: try: UpperCAmelCase_ = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number.')
346
1
'''simple docstring''' import argparse from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt if __name__ == "__main__": SCREAMING_SNAKE_CASE_: Any =argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--original_config_file', type=str, required=True, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--image_size', default=5_12, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') def lowerCAmelCase_ ( snake_case_ : str ) -> Any: '''simple docstring''' if string == "True": return True elif string == "False": return False else: raise ValueError(f"""could not parse string as bool {string}""" ) parser.add_argument( '--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool ) parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int) SCREAMING_SNAKE_CASE_: Tuple =parser.parse_args() SCREAMING_SNAKE_CASE_: Any =download_controlnet_from_original_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, extract_ema=args.extract_ema, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, use_linear_projection=args.use_linear_projection, cross_attention_dim=args.cross_attention_dim, ) controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
106
'''simple docstring''' # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib SCREAMING_SNAKE_CASE_: List[str] =get_logger() SCREAMING_SNAKE_CASE_: Optional[dict] =None class __A ( TensorFormatter[Mapping, """jax.Array""", Mapping] ): def __init__(self : List[Any] , __a : Optional[int]=None , __a : Any=None , **__a : Dict ): super().__init__(features=__a ) import jax from jaxlib.xla_client import Device if isinstance(__a , __a ): raise ValueError( f"""Expected {device} to be a `str` not {type(__a )}, as `jaxlib.xla_extension.Device` """ "is not serializable neither with `pickle` nor with `dill`. Instead you can surround " "the device with `str()` to get its string identifier that will be internally mapped " "to the actual `jaxlib.xla_extension.Device`." ) UpperCAmelCase_ = device if isinstance(__a , __a ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: UpperCAmelCase_ = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( f"""Device with string identifier {self.device} not listed among the available """ f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """ f"""device: {str(jax.devices()[0] )}.""" ) UpperCAmelCase_ = str(jax.devices()[0] ) UpperCAmelCase_ = jnp_array_kwargs @staticmethod def _lowercase (): import jax return {str(__a ): device for device in jax.devices()} def _lowercase (self : str , __a : Tuple ): import jax import jax.numpy as jnp if isinstance(__a , __a ) and column: if all( isinstance(__a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(__a , axis=0 ) return column def _lowercase (self : Any , __a : Optional[int] ): import jax import jax.numpy as jnp if isinstance(__a , (str, bytes, type(__a )) ): return value elif isinstance(__a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() UpperCAmelCase_ = {} if isinstance(__a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: UpperCAmelCase_ = {"dtype": jnp.intaa} else: UpperCAmelCase_ = {"dtype": jnp.intaa} elif isinstance(__a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): UpperCAmelCase_ = {"dtype": jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(__a , PIL.Image.Image ): UpperCAmelCase_ = np.asarray(__a ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: UpperCAmelCase_ = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(__a , **{**default_dtype, **self.jnp_array_kwargs} ) def _lowercase (self : int , __a : Any ): import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(__a , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(__a , "__array__" ) and not isinstance(__a , jax.Array ): UpperCAmelCase_ = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(__a , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(__a ) for substruct in data_struct] ) elif isinstance(__a , (list, tuple) ): return self._consolidate([self.recursive_tensorize(__a ) for substruct in data_struct] ) return self._tensorize(__a ) def _lowercase (self : Union[str, Any] , __a : dict ): return map_nested(self._recursive_tensorize , __a , map_list=__a ) def _lowercase (self : str , __a : pa.Table ): UpperCAmelCase_ = self.numpy_arrow_extractor().extract_row(__a ) UpperCAmelCase_ = self.python_features_decoder.decode_row(__a ) return self.recursive_tensorize(__a ) def _lowercase (self : Tuple , __a : pa.Table ): UpperCAmelCase_ = self.numpy_arrow_extractor().extract_column(__a ) UpperCAmelCase_ = self.python_features_decoder.decode_column(__a , pa_table.column_names[0] ) UpperCAmelCase_ = self.recursive_tensorize(__a ) UpperCAmelCase_ = self._consolidate(__a ) return column def _lowercase (self : str , __a : pa.Table ): UpperCAmelCase_ = self.numpy_arrow_extractor().extract_batch(__a ) UpperCAmelCase_ = self.python_features_decoder.decode_batch(__a ) UpperCAmelCase_ = self.recursive_tensorize(__a ) for column_name in batch: UpperCAmelCase_ = self._consolidate(batch[column_name] ) return batch
106
1
"""simple docstring""" a : Tuple = ''' # Transformers 설치 방법 ! pip install transformers datasets # 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요. # ! pip install git+https://github.com/huggingface/transformers.git ''' a : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] a : Union[str, Any] = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
105
from torch import nn class a ( nn.Module ): def __init__( self :Tuple ,__lowercase :Optional[int] ,__lowercase :int ): super().__init__() snake_case__ : Optional[Any] = class_size snake_case__ : Dict = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) snake_case__ : Dict = nn.Linear(__lowercase ,__lowercase ) def __lowerCamelCase ( self :str ,__lowercase :int ): # hidden_state = nn.functional.relu(self.mlp1(hidden_state)) # hidden_state = self.mlp2(hidden_state) snake_case__ : Optional[Any] = self.mlp(__lowercase ) return logits
230
0
"""simple docstring""" import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters lowerCAmelCase__ : List[Any] =(720, 1280) # Height, Width lowerCAmelCase__ : Union[str, Any] =(0.4, 0.6) # if height or width lower than this scale, drop it. lowerCAmelCase__ : Union[str, Any] =1 / 100 lowerCAmelCase__ : int ='''''' lowerCAmelCase__ : List[Any] ='''''' lowerCAmelCase__ : List[str] ='''''' lowerCAmelCase__ : str =250 def __lowercase ( ) -> None: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_dataset(a__ , a__ ) for index in range(a__ ): __SCREAMING_SNAKE_CASE = random.sample(range(len(a__ ) ) , 4 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = update_image_and_anno( a__ , a__ , a__ , a__ , a__ , filter_scale=a__ , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __SCREAMING_SNAKE_CASE = random_chars(32 ) __SCREAMING_SNAKE_CASE = path.split(os.sep )[-1].rsplit('.' , 1 )[0] __SCREAMING_SNAKE_CASE = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}""" cva.imwrite(f"""{file_root}.jpg""" , a__ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" ) __SCREAMING_SNAKE_CASE = [] for anno in new_annos: __SCREAMING_SNAKE_CASE = anno[3] - anno[1] __SCREAMING_SNAKE_CASE = anno[4] - anno[2] __SCREAMING_SNAKE_CASE = anno[1] + width / 2 __SCREAMING_SNAKE_CASE = anno[2] + height / 2 __SCREAMING_SNAKE_CASE = f"""{anno[0]} {x_center} {y_center} {width} {height}""" annos_list.append(a__ ) with open(f"""{file_root}.txt""" , 'w' ) as outfile: outfile.write('\n'.join(line for line in annos_list ) ) def __lowercase ( a__ , a__ ) -> tuple[list, list]: __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] for label_file in glob.glob(os.path.join(a__ , '*.txt' ) ): __SCREAMING_SNAKE_CASE = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0] with open(a__ ) as in_file: __SCREAMING_SNAKE_CASE = in_file.readlines() __SCREAMING_SNAKE_CASE = os.path.join(a__ , f"""{label_name}.jpg""" ) __SCREAMING_SNAKE_CASE = [] for obj_list in obj_lists: __SCREAMING_SNAKE_CASE = obj_list.rstrip('\n' ).split(' ' ) __SCREAMING_SNAKE_CASE = float(obj[1] ) - float(obj[3] ) / 2 __SCREAMING_SNAKE_CASE = float(obj[2] ) - float(obj[4] ) / 2 __SCREAMING_SNAKE_CASE = float(obj[1] ) + float(obj[3] ) / 2 __SCREAMING_SNAKE_CASE = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(a__ ) labels.append(a__ ) return img_paths, labels def __lowercase ( a__ , a__ , a__ , a__ , a__ , a__ = 0.0 , ) -> tuple[list, list, str]: __SCREAMING_SNAKE_CASE = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) __SCREAMING_SNAKE_CASE = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) __SCREAMING_SNAKE_CASE = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) __SCREAMING_SNAKE_CASE = int(scale_x * output_size[1] ) __SCREAMING_SNAKE_CASE = int(scale_y * output_size[0] ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] for i, index in enumerate(a__ ): __SCREAMING_SNAKE_CASE = all_img_list[index] path_list.append(a__ ) __SCREAMING_SNAKE_CASE = all_annos[index] __SCREAMING_SNAKE_CASE = cva.imread(a__ ) if i == 0: # top-left __SCREAMING_SNAKE_CASE = cva.resize(a__ , (divid_point_x, divid_point_y) ) __SCREAMING_SNAKE_CASE = img for bbox in img_annos: __SCREAMING_SNAKE_CASE = bbox[1] * scale_x __SCREAMING_SNAKE_CASE = bbox[2] * scale_y __SCREAMING_SNAKE_CASE = bbox[3] * scale_x __SCREAMING_SNAKE_CASE = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right __SCREAMING_SNAKE_CASE = cva.resize(a__ , (output_size[1] - divid_point_x, divid_point_y) ) __SCREAMING_SNAKE_CASE = img for bbox in img_annos: __SCREAMING_SNAKE_CASE = scale_x + bbox[1] * (1 - scale_x) __SCREAMING_SNAKE_CASE = bbox[2] * scale_y __SCREAMING_SNAKE_CASE = scale_x + bbox[3] * (1 - scale_x) __SCREAMING_SNAKE_CASE = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left __SCREAMING_SNAKE_CASE = cva.resize(a__ , (divid_point_x, output_size[0] - divid_point_y) ) __SCREAMING_SNAKE_CASE = img for bbox in img_annos: __SCREAMING_SNAKE_CASE = bbox[1] * scale_x __SCREAMING_SNAKE_CASE = scale_y + bbox[2] * (1 - scale_y) __SCREAMING_SNAKE_CASE = bbox[3] * scale_x __SCREAMING_SNAKE_CASE = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right __SCREAMING_SNAKE_CASE = cva.resize( a__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) __SCREAMING_SNAKE_CASE = img for bbox in img_annos: __SCREAMING_SNAKE_CASE = scale_x + bbox[1] * (1 - scale_x) __SCREAMING_SNAKE_CASE = scale_y + bbox[2] * (1 - scale_y) __SCREAMING_SNAKE_CASE = scale_x + bbox[3] * (1 - scale_x) __SCREAMING_SNAKE_CASE = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: __SCREAMING_SNAKE_CASE = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def __lowercase ( a__ ) -> str: assert number_char > 1, "The number of character should greater than 1" __SCREAMING_SNAKE_CASE = ascii_lowercase + digits return "".join(random.choice(a__ ) for _ in range(a__ ) ) if __name__ == "__main__": main() print('''DONE ✅''')
359
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ : Optional[int] = ['''speech'''] def __init__( self , *_A , **_A ): '''simple docstring''' requires_backends(self , ['speech'] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ : Optional[int] = ['''speech'''] def __init__( self , *_A , **_A ): '''simple docstring''' requires_backends(self , ['speech'] )
118
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A_ : Tuple = { """configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""], """tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Optional[Any] = [ """TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""", """AdaptiveEmbedding""", """TransfoXLForSequenceClassification""", """TransfoXLLMHeadModel""", """TransfoXLModel""", """TransfoXLPreTrainedModel""", """load_tf_weights_in_transfo_xl""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Optional[Any] = [ """TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFAdaptiveEmbedding""", """TFTransfoXLForSequenceClassification""", """TFTransfoXLLMHeadModel""", """TFTransfoXLMainLayer""", """TFTransfoXLModel""", """TFTransfoXLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys A_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
215
'''simple docstring''' from numpy import exp, pi, sqrt def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = 1.0 )-> int: '''simple docstring''' return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
215
1
'''simple docstring''' import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def lowerCamelCase__ ( __lowerCamelCase : ndarray ): return np.dot(__lowerCamelCase , __lowerCamelCase ) class __magic_name__ : def __init__( self , *, snake_case = np.inf , snake_case = "linear" , snake_case = 0.0 , ) -> Dict: '''simple docstring''' _UpperCAmelCase : Union[str, Any] =regularization _UpperCAmelCase : Optional[int] =gamma if kernel == "linear": _UpperCAmelCase : Union[str, Any] =self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError('rbf kernel requires gamma') if not isinstance(self.gamma , (float, int)): raise ValueError('gamma must be float or int') if not self.gamma > 0: raise ValueError('gamma must be > 0') _UpperCAmelCase : List[str] =self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: _UpperCAmelCase : Any =f"Unknown kernel: {kernel}" raise ValueError(__A) def lowerCAmelCase ( self , snake_case , snake_case) -> Union[str, Any]: '''simple docstring''' return np.dot(__A , __A) def lowerCAmelCase ( self , snake_case , snake_case) -> int: '''simple docstring''' return np.exp(-(self.gamma * norm_squared(vectora - vectora))) def lowerCAmelCase ( self , snake_case , snake_case) -> Tuple: '''simple docstring''' _UpperCAmelCase : str =observations _UpperCAmelCase : Optional[int] =classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations (_UpperCAmelCase ) : str =np.shape(__A) def to_minimize(snake_case) -> float: _UpperCAmelCase : Dict =0 (_UpperCAmelCase ) : List[Any] =np.shape(__A) for i in range(__A): for j in range(__A): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j]) ) return 1 / 2 * s - sum(__A) _UpperCAmelCase : List[str] =LinearConstraint(__A , 0 , 0) _UpperCAmelCase : Any =Bounds(0 , self.regularization) _UpperCAmelCase : str =minimize( __A , np.ones(__A) , bounds=__A , constraints=[ly_contraint]).x _UpperCAmelCase : List[str] =l_star # calculating mean offset of separation plane to points _UpperCAmelCase : Tuple =0 for i in range(__A): for j in range(__A): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j]) _UpperCAmelCase : List[str] =s / n def lowerCAmelCase ( self , snake_case) -> str: '''simple docstring''' _UpperCAmelCase : str =sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , __A) for n in range(len(self.classes))) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
361
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase ={ 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase =[ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase =[ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys lowercase =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
242
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging __lowerCAmelCase : str = logging.get_logger(__name__) __lowerCAmelCase : Dict = '▁' __lowerCAmelCase : Optional[Any] = {'vocab_file': 'sentencepiece.bpe.model'} __lowerCAmelCase : Union[str, Any] = { 'vocab_file': { 'facebook/mbart-large-en-ro': ( 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model' ), 'facebook/mbart-large-cc25': ( 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model' ), } } __lowerCAmelCase : Union[str, Any] = { 'facebook/mbart-large-en-ro': 1024, 'facebook/mbart-large-cc25': 1024, } # fmt: off __lowerCAmelCase : Optional[Any] = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = ["""input_ids""", """attention_mask"""] a__ = [] a__ = [] def __init__( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple="<s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Any="</s>" , UpperCamelCase__ : int="<s>" , UpperCamelCase__ : Dict="<unk>" , UpperCamelCase__ : Tuple="<pad>" , UpperCamelCase__ : str="<mask>" , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[Dict[str, Any]] = None , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : int , ) -> List[Any]: """simple docstring""" __magic_name__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token __magic_name__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , ) __magic_name__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase__ ) ) __magic_name__ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __magic_name__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __magic_name__ = 1 __magic_name__ = len(self.sp_model ) __magic_name__ = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase__ ) } __magic_name__ = {v: k for k, v in self.lang_code_to_id.items()} __magic_name__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) __magic_name__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} __magic_name__ = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) __magic_name__ = src_lang if src_lang is not None else """en_XX""" __magic_name__ = self.lang_code_to_id[self._src_lang] __magic_name__ = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : int ) -> int: """simple docstring""" __magic_name__ = self.__dict__.copy() __magic_name__ = None __magic_name__ = self.sp_model.serialized_model_proto() return state def __setstate__( self : Tuple , UpperCamelCase__ : int ) -> Dict: """simple docstring""" __magic_name__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __magic_name__ = {} __magic_name__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def _lowercase ( self : Dict ) -> str: """simple docstring""" return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" return self._src_lang @src_lang.setter def _lowercase ( self : Optional[int] , UpperCamelCase__ : str ) -> None: """simple docstring""" __magic_name__ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ ) __magic_name__ = [1] * len(self.prefix_tokens ) __magic_name__ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(UpperCamelCase__ )) + suffix_ones return prefix_ones + ([0] * len(UpperCamelCase__ )) + ([0] * len(UpperCamelCase__ )) + suffix_ones def _lowercase ( self : List[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __magic_name__ = [self.sep_token_id] __magic_name__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : int ) -> List[Any]: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) __magic_name__ = src_lang __magic_name__ = self(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = self.convert_tokens_to_ids(UpperCamelCase__ ) __magic_name__ = tgt_lang_id return inputs def _lowercase ( self : Optional[int] ) -> Any: """simple docstring""" __magic_name__ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ ) def _lowercase ( self : Dict , UpperCamelCase__ : Optional[int] ) -> List[str]: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __magic_name__ = self.sp_model.PieceToId(UpperCamelCase__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowercase ( self : Any , UpperCamelCase__ : List[Any] ) -> Any: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowercase ( self : Any , UpperCamelCase__ : Tuple ) -> str: """simple docstring""" __magic_name__ = """""".join(UpperCamelCase__ ).replace(UpperCamelCase__ , """ """ ).strip() return out_string def _lowercase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __magic_name__ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase__ , """wb""" ) as fi: __magic_name__ = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase__ ) return (out_vocab_file,) def _lowercase ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str = "en_XX" , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : str = "ro_RO" , **UpperCamelCase__ : int , ) -> BatchEncoding: """simple docstring""" __magic_name__ = src_lang __magic_name__ = tgt_lang return super().prepare_seqaseq_batch(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[int] ) -> List[str]: """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def _lowercase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : str ) -> None: """simple docstring""" __magic_name__ = self.lang_code_to_id[src_lang] __magic_name__ = [] __magic_name__ = [self.eos_token_id, self.cur_lang_code] def _lowercase ( self : Tuple , UpperCamelCase__ : str ) -> None: """simple docstring""" __magic_name__ = self.lang_code_to_id[lang] __magic_name__ = [] __magic_name__ = [self.eos_token_id, self.cur_lang_code]
88
import re import string import numpy as np import datasets __lowerCAmelCase : Optional[int] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n' __lowerCAmelCase : Optional[int] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n' __lowerCAmelCase : Optional[int] = '\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): '''simple docstring''' def _lowercase ( self : str ) -> Optional[int]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , reference_urls=[] , ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : int=False , UpperCamelCase__ : Tuple=False , ) -> Dict: """simple docstring""" if regexes_to_ignore is not None: for s in regexes_to_ignore: __magic_name__ = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in predictions] ) __magic_name__ = np.array([re.sub(UpperCamelCase__ , """""" , UpperCamelCase__ ) for x in references] ) else: __magic_name__ = np.asarray(UpperCamelCase__ ) __magic_name__ = np.asarray(UpperCamelCase__ ) if ignore_case: __magic_name__ = np.char.lower(UpperCamelCase__ ) __magic_name__ = np.char.lower(UpperCamelCase__ ) if ignore_punctuation: __magic_name__ = string.punctuation.maketrans("""""" , """""" , string.punctuation ) __magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ ) __magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ ) if ignore_numbers: __magic_name__ = string.digits.maketrans("""""" , """""" , string.digits ) __magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ ) __magic_name__ = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ ) __magic_name__ = predictions == references return {"exact_match": np.mean(UpperCamelCase__ ) * 100}
88
1
class _a : def __init__(self ) -> str: UpperCAmelCase_: Dict = 0 UpperCAmelCase_: Any = 0 UpperCAmelCase_: List[Any] = {} def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> str: if vertex not in self.adjacency: UpperCAmelCase_: Dict = {} self.num_vertices += 1 def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int: self.add_vertex(SCREAMING_SNAKE_CASE_ ) self.add_vertex(SCREAMING_SNAKE_CASE_ ) if head == tail: return UpperCAmelCase_: Optional[int] = weight UpperCAmelCase_: List[str] = weight def __snake_case (self ) -> Tuple: UpperCAmelCase_: str = self.get_edges() for edge in edges: UpperCAmelCase_: Optional[Any] = edge edges.remove((tail, head, weight) ) for i in range(len(SCREAMING_SNAKE_CASE_ ) ): UpperCAmelCase_: Union[str, Any] = list(edges[i] ) edges.sort(key=lambda SCREAMING_SNAKE_CASE_ : e[2] ) for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ): if edges[i][2] >= edges[i + 1][2]: UpperCAmelCase_: str = edges[i][2] + 1 for edge in edges: UpperCAmelCase_: List[Any] = edge UpperCAmelCase_: Dict = weight UpperCAmelCase_: Any = weight def __str__(self ) -> str: UpperCAmelCase_: int = """""" for tail in self.adjacency: for head in self.adjacency[tail]: UpperCAmelCase_: str = self.adjacency[head][tail] string += f'{head} -> {tail} == {weight}\n' return string.rstrip("""\n""" ) def __snake_case (self ) -> Optional[int]: UpperCAmelCase_: int = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def __snake_case (self ) -> Optional[int]: return self.adjacency.keys() @staticmethod def __snake_case (SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None ) -> Optional[int]: UpperCAmelCase_: Union[str, Any] = Graph() if vertices is None: UpperCAmelCase_: str = [] if edges is None: UpperCAmelCase_: Any = [] for vertex in vertices: g.add_vertex(SCREAMING_SNAKE_CASE_ ) for edge in edges: g.add_edge(*SCREAMING_SNAKE_CASE_ ) return g class _a : def __init__(self ) -> Any: UpperCAmelCase_: Optional[int] = {} UpperCAmelCase_: Union[str, Any] = {} def __len__(self ) -> List[Any]: return len(self.parent ) def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> List[Any]: if item in self.parent: return self.find(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Optional[Any] = item UpperCAmelCase_: Optional[int] = 0 return item def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: if item not in self.parent: return self.make_set(SCREAMING_SNAKE_CASE_ ) if item != self.parent[item]: UpperCAmelCase_: Union[str, Any] = self.find(self.parent[item] ) return self.parent[item] def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]: UpperCAmelCase_: Union[str, Any] = self.find(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: List[Any] = self.find(SCREAMING_SNAKE_CASE_ ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: UpperCAmelCase_: List[Any] = roota return roota if self.rank[roota] < self.rank[roota]: UpperCAmelCase_: Tuple = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 UpperCAmelCase_: List[Any] = roota return roota return None @staticmethod def __snake_case (SCREAMING_SNAKE_CASE_ ) -> Tuple: UpperCAmelCase_: Tuple = graph.num_vertices UpperCAmelCase_: List[str] = Graph.UnionFind() UpperCAmelCase_: Any = [] while num_components > 1: UpperCAmelCase_: List[str] = {} for vertex in graph.get_vertices(): UpperCAmelCase_: Optional[Any] = -1 UpperCAmelCase_: str = graph.get_edges() for edge in edges: UpperCAmelCase_: Any = edge edges.remove((tail, head, weight) ) for edge in edges: UpperCAmelCase_: List[Any] = edge UpperCAmelCase_: Union[str, Any] = union_find.find(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Union[str, Any] = union_find.find(SCREAMING_SNAKE_CASE_ ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: UpperCAmelCase_: Optional[Any] = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: UpperCAmelCase_: Union[str, Any] = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: UpperCAmelCase_: Any = cheap_edge[vertex] if union_find.find(SCREAMING_SNAKE_CASE_ ) != union_find.find(SCREAMING_SNAKE_CASE_ ): union_find.union(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) mst_edges.append(cheap_edge[vertex] ) UpperCAmelCase_: Optional[int] = num_components - 1 UpperCAmelCase_: Optional[Any] = Graph.build(edges=SCREAMING_SNAKE_CASE_ ) return mst
360
import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging a : Tuple = logging.get_logger(__name__) a : Optional[Any] = { 'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json', 'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json', } class _a ( _lowerCAmelCase ): A = '''encodec''' def __init__(self, SCREAMING_SNAKE_CASE_=[1.5, 3.0, 6.0, 1_2.0, 2_4.0], SCREAMING_SNAKE_CASE_=24000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=128, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=[8, 5, 4, 2], SCREAMING_SNAKE_CASE_="weight_norm", SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="reflect", SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=1.0, SCREAMING_SNAKE_CASE_=1024, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]: UpperCAmelCase_: List[Any] = target_bandwidths UpperCAmelCase_: str = sampling_rate UpperCAmelCase_: Any = audio_channels UpperCAmelCase_: List[str] = normalize UpperCAmelCase_: List[Any] = chunk_length_s UpperCAmelCase_: List[Any] = overlap UpperCAmelCase_: Any = hidden_size UpperCAmelCase_: str = num_filters UpperCAmelCase_: Any = num_residual_layers UpperCAmelCase_: int = upsampling_ratios UpperCAmelCase_: Tuple = norm_type UpperCAmelCase_: Union[str, Any] = kernel_size UpperCAmelCase_: str = last_kernel_size UpperCAmelCase_: Union[str, Any] = residual_kernel_size UpperCAmelCase_: str = dilation_growth_rate UpperCAmelCase_: int = use_causal_conv UpperCAmelCase_: int = pad_mode UpperCAmelCase_: List[Any] = compress UpperCAmelCase_: Dict = num_lstm_layers UpperCAmelCase_: List[Any] = trim_right_ratio UpperCAmelCase_: List[Any] = codebook_size UpperCAmelCase_: List[Any] = codebook_dim if codebook_dim is not None else hidden_size UpperCAmelCase_: Optional[Any] = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' ) super().__init__(**SCREAMING_SNAKE_CASE_ ) @property def __snake_case (self ) -> Optional[int]: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def __snake_case (self ) -> Optional[int]: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1, int((1.0 - self.overlap) * self.chunk_length ) ) @property def __snake_case (self ) -> int: UpperCAmelCase_: Optional[int] = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def __snake_case (self ) -> int: return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
82
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a__ : List[str] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowercase_ ( a__ ): __UpperCAmelCase = ['pixel_values'] def __init__( self , a = True , a = None , a = PILImageResampling.BICUBIC , a = True , a = None , a = True , a = 1 / 2_55 , a = True , a = None , a = None , a = True , **a , ): super().__init__(**a ) UpperCamelCase__ = size if size is not None else {"shortest_edge": 2_24} UpperCamelCase__ = get_size_dict(a , default_to_square=a ) UpperCamelCase__ = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} UpperCamelCase__ = get_size_dict(a , default_to_square=a , param_name="crop_size" ) UpperCamelCase__ = do_resize UpperCamelCase__ = size UpperCamelCase__ = resample UpperCamelCase__ = do_center_crop UpperCamelCase__ = crop_size UpperCamelCase__ = do_rescale UpperCamelCase__ = rescale_factor UpperCamelCase__ = do_normalize UpperCamelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN UpperCamelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD UpperCamelCase__ = do_convert_rgb def __a ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ): UpperCamelCase__ = get_size_dict(a , default_to_square=a ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) UpperCamelCase__ = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a ) return resize(a , size=a , resample=a , data_format=a , **a ) def __a ( self , a , a , a = None , **a , ): UpperCamelCase__ = get_size_dict(a ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a ) def __a ( self , a , a , a = None , **a , ): return rescale(a , scale=a , data_format=a , **a ) def __a ( self , a , a , a , a = None , **a , ): return normalize(a , mean=a , std=a , data_format=a , **a ) def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ): UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize UpperCamelCase__ = size if size is not None else self.size UpperCamelCase__ = get_size_dict(a , param_name="size" , default_to_square=a ) UpperCamelCase__ = resample if resample is not None else self.resample UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size UpperCamelCase__ = get_size_dict(a , param_name="crop_size" , default_to_square=a ) UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean UpperCamelCase__ = image_std if image_std is not None else self.image_std UpperCamelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb UpperCamelCase__ = make_list_of_images(a ) if not valid_images(a ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: UpperCamelCase__ = [convert_to_rgb(a ) for image in images] # All transformations expect numpy arrays. UpperCamelCase__ = [to_numpy_array(a ) for image in images] if do_resize: UpperCamelCase__ = [self.resize(image=a , size=a , resample=a ) for image in images] if do_center_crop: UpperCamelCase__ = [self.center_crop(image=a , size=a ) for image in images] if do_rescale: UpperCamelCase__ = [self.rescale(image=a , scale=a ) for image in images] if do_normalize: UpperCamelCase__ = [self.normalize(image=a , mean=a , std=a ) for image in images] UpperCamelCase__ = [to_channel_dimension_format(a , a ) for image in images] UpperCamelCase__ = {"pixel_values": images} return BatchFeature(data=a , tensor_type=a )
80
'''simple docstring''' from __future__ import annotations lowerCamelCase__ = 'Muhammad Umer Farooq' lowerCamelCase__ = 'MIT' lowerCamelCase__ = '1.0.0' lowerCamelCase__ = 'Muhammad Umer Farooq' lowerCamelCase__ = 'contact@muhammadumerfarooq.me' lowerCamelCase__ = 'Alpha' import re from html.parser import HTMLParser from urllib import parse import requests class lowerCAmelCase__ ( UpperCAmelCase__ ): def __init__( self : Any , lowerCamelCase__ : str ) ->None: '''simple docstring''' super().__init__() _UpperCAmelCase : list[str] = [] _UpperCAmelCase : List[str] = domain def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : list[tuple[str, str | None]] ) ->None: '''simple docstring''' if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: _UpperCAmelCase : Dict = parse.urljoin(self.domain , lowerCamelCase__ ) self.urls.append(lowerCamelCase__ ) def __lowerCAmelCase (__lowerCAmelCase ): return ".".join(get_sub_domain_name(__lowerCAmelCase ).split("." )[-2:] ) def __lowerCAmelCase (__lowerCAmelCase ): return parse.urlparse(__lowerCAmelCase ).netloc def __lowerCAmelCase (__lowerCAmelCase = "https://github.com" ): _UpperCAmelCase : List[str] = get_domain_name(__lowerCAmelCase ) # Initialize the parser _UpperCAmelCase : int = Parser(__lowerCAmelCase ) try: # Open URL _UpperCAmelCase : Any = requests.get(__lowerCAmelCase ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through _UpperCAmelCase : Any = set() for link in parser.urls: # open URL. # read = requests.get(link) try: _UpperCAmelCase : List[str] = requests.get(__lowerCAmelCase ) # Get the valid email. _UpperCAmelCase : Optional[int] = re.findall("[a-zA-Z0-9]+@" + domain , read.text ) # If not in list then append it. for email in emails: valid_emails.add(__lowerCAmelCase ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(__lowerCAmelCase ) if __name__ == "__main__": lowerCamelCase__ = emails_from_url('https://github.com') print(F'''{len(emails)} emails found:''') print('\n'.join(sorted(emails)))
234
0
from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __A : Optional[Any] = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__): _UpperCamelCase:Optional[Any] = ["input_values", "padding_mask"] def __init__( self , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 2_4000 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> int: super().__init__(feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) lowerCamelCase_ =chunk_length_s lowerCamelCase_ =overlap @property def _snake_case ( self )-> Optional[int]: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def _snake_case ( self )-> Optional[int]: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , )-> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' f' {self.sampling_rate}. Please make sure that the provided audio input was sampled with' f' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) if padding and truncation: raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" ) elif padding is None: # by default let's pad the inputs lowerCamelCase_ =True lowerCamelCase_ =bool( isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCamelCase_ =[np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ): lowerCamelCase_ =np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): lowerCamelCase_ =raw_audio.astype(np.floataa ) # always return batch if not is_batched: lowerCamelCase_ =[np.asarray(_SCREAMING_SNAKE_CASE ).T] # verify inputs are valid for idx, example in enumerate(_SCREAMING_SNAKE_CASE ): if example.ndim > 2: raise ValueError(f'Expected input shape (channels, length) but got shape {example.shape}' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(f'Expected mono audio but example has {example.shape[-1]} channels' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f'Expected stereo audio but example has {example.shape[-1]} channels' ) lowerCamelCase_ =None lowerCamelCase_ =BatchFeature({"""input_values""": raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: lowerCamelCase_ =min(array.shape[0] for array in raw_audio ) lowerCamelCase_ =int(np.floor(max_length / self.chunk_stride ) ) lowerCamelCase_ =(nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: lowerCamelCase_ =max(array.shape[0] for array in raw_audio ) lowerCamelCase_ =int(np.ceil(max_length / self.chunk_stride ) ) lowerCamelCase_ =(nb_step - 1) * self.chunk_stride + self.chunk_length lowerCamelCase_ ="""max_length""" else: lowerCamelCase_ =input_values # normal padding on batch if padded_inputs is None: lowerCamelCase_ =self.pad( _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , ) if padding: lowerCamelCase_ =padded_inputs.pop("""attention_mask""" ) lowerCamelCase_ =[] for example in padded_inputs.pop("""input_values""" ): if self.feature_size == 1: lowerCamelCase_ =example[..., None] input_values.append(example.T ) lowerCamelCase_ =input_values if return_tensors is not None: lowerCamelCase_ =padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE ) return padded_inputs
49
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets __A : int = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n' __A : Any = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n' __A : Union[str, Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _SCREAMING_SNAKE_CASE ( datasets.Metric): def _snake_case ( self )-> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[ """https://arxiv.org/abs/2102.01454""", """https://github.com/krishnap25/mauve""", ] , ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="auto" , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=500 , _SCREAMING_SNAKE_CASE="gpt2-large" , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=25 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=25 , )-> List[str]: lowerCamelCase_ =compute_mauve( p_text=_SCREAMING_SNAKE_CASE , q_text=_SCREAMING_SNAKE_CASE , p_features=_SCREAMING_SNAKE_CASE , q_features=_SCREAMING_SNAKE_CASE , p_tokens=_SCREAMING_SNAKE_CASE , q_tokens=_SCREAMING_SNAKE_CASE , num_buckets=_SCREAMING_SNAKE_CASE , pca_max_data=_SCREAMING_SNAKE_CASE , kmeans_explained_var=_SCREAMING_SNAKE_CASE , kmeans_num_redo=_SCREAMING_SNAKE_CASE , kmeans_max_iter=_SCREAMING_SNAKE_CASE , featurize_model_name=_SCREAMING_SNAKE_CASE , device_id=_SCREAMING_SNAKE_CASE , max_text_length=_SCREAMING_SNAKE_CASE , divergence_curve_discretization_size=_SCREAMING_SNAKE_CASE , mauve_scaling_factor=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , seed=_SCREAMING_SNAKE_CASE , ) return out
49
1
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING _A = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase ) class _lowerCAmelCase ( _UpperCAmelCase ): def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]: super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) requires_backends(self , "vision" ) self.check_model_type(_UpperCAmelCase ) def __call__( self , _UpperCamelCase , **_UpperCamelCase ) -> Any: return super().__call__(_UpperCAmelCase , **_UpperCAmelCase ) def __a ( self , **_UpperCamelCase ) -> Tuple: return {}, {}, {} def __a ( self , _UpperCamelCase ) -> Dict: lowerCAmelCase_ = load_image(_UpperCAmelCase ) lowerCAmelCase_ = image.size lowerCAmelCase_ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework ) return model_inputs def __a ( self , _UpperCamelCase ) -> Any: lowerCAmelCase_ = self.model(**_UpperCAmelCase ) return model_outputs def __a ( self , _UpperCamelCase ) -> int: lowerCAmelCase_ = model_outputs.predicted_depth lowerCAmelCase_ = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=_UpperCAmelCase ) lowerCAmelCase_ = prediction.squeeze().cpu().numpy() lowerCAmelCase_ = (output * 255 / np.max(_UpperCAmelCase )).astype("uint8" ) lowerCAmelCase_ = Image.fromarray(_UpperCAmelCase ) lowerCAmelCase_ = {} lowerCAmelCase_ = predicted_depth lowerCAmelCase_ = depth return output_dict
231
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer __A = logging.get_logger(__name__) __A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __A = { "vocab_file": { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json" ), }, } __A = { "yjernite/retribert-base-uncased": 5_1_2, } __A = { "yjernite/retribert-base-uncased": {"do_lower_case": True}, } class UpperCAmelCase (_UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :Dict = VOCAB_FILES_NAMES _UpperCAmelCase :str = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase :Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase :Optional[int] = PRETRAINED_INIT_CONFIGURATION _UpperCAmelCase :str = RetriBertTokenizer _UpperCAmelCase :List[str] = ["input_ids", "attention_mask"] def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase="[UNK]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[PAD]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ): super().__init__( _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , ) lowercase__: Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _UpperCAmelCase ) != do_lower_case or normalizer_state.get('''strip_accents''' , _UpperCAmelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _UpperCAmelCase ) != tokenize_chinese_chars ): lowercase__: str = getattr(_UpperCAmelCase , normalizer_state.pop('''type''' ) ) lowercase__: Optional[Any] = do_lower_case lowercase__: Tuple = strip_accents lowercase__: str = tokenize_chinese_chars lowercase__: Union[str, Any] = normalizer_class(**_UpperCAmelCase ) lowercase__: List[str] = do_lower_case def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=None ): lowercase__: Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): lowercase__: Optional[int] = [self.sep_token_id] lowercase__: Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ): lowercase__: List[str] = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase ) return tuple(_UpperCAmelCase )
177
0
"""simple docstring""" import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class UpperCAmelCase : '''simple docstring''' def __init__( self , lowercase , lowercase=1_4 , lowercase=7 , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=9_9 , lowercase=3_2 , lowercase=4 , lowercase=4 , lowercase=4 , lowercase=3_7 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=0.02 , ): """simple docstring""" A_ : Dict = parent A_ : Union[str, Any] = batch_size A_ : Tuple = seq_length A_ : Tuple = is_training A_ : Optional[Any] = use_input_mask A_ : Any = use_token_type_ids A_ : List[str] = use_labels A_ : List[str] = vocab_size A_ : int = hidden_size A_ : Any = rotary_dim A_ : List[Any] = num_hidden_layers A_ : Dict = num_attention_heads A_ : List[Any] = intermediate_size A_ : List[str] = hidden_act A_ : Dict = hidden_dropout_prob A_ : int = attention_probs_dropout_prob A_ : str = max_position_embeddings A_ : Optional[int] = initializer_range A_ : Tuple = None A_ : Optional[Any] = vocab_size - 1 A_ : Any = vocab_size - 1 A_ : Union[str, Any] = vocab_size - 1 def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : int = None if self.use_input_mask: A_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) A_ : List[str] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowercase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[Any] = self.prepare_config_and_inputs() A_ : List[Any] = config_and_inputs A_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase ): """simple docstring""" A_ : Tuple = 2_0 A_ : int = model_class_name(lowercase ) A_ : List[Any] = model.init_cache(input_ids.shape[0] , lowercase ) A_ : Tuple = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' ) A_ : List[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) A_ : Tuple = model( input_ids[:, :-1] , attention_mask=lowercase , past_key_values=lowercase , position_ids=lowercase , ) A_ : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) A_ : Tuple = model( input_ids[:, -1:] , attention_mask=lowercase , past_key_values=outputs_cache.past_key_values , position_ids=lowercase , ) A_ : Tuple = model(lowercase ) A_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase ): """simple docstring""" A_ : List[Any] = 2_0 A_ : List[str] = model_class_name(lowercase ) A_ : int = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) A_ : Any = model.init_cache(input_ids.shape[0] , lowercase ) A_ : List[str] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) A_ : int = model( input_ids[:, :-1] , attention_mask=lowercase , past_key_values=lowercase , position_ids=lowercase , ) A_ : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) A_ : Optional[Any] = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowercase , position_ids=lowercase , ) A_ : str = model(lowercase , attention_mask=lowercase ) A_ : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) @require_flax class UpperCAmelCase ( __lowercase , __lowercase , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () lowerCamelCase_ = (FlaxGPTJForCausalLM,) if is_flax_available() else () def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = FlaxGPTJModelTester(self ) def lowerCAmelCase_ ( self ): """simple docstring""" for model_class_name in self.all_model_classes: A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowercase , lowercase , lowercase , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" for model_class_name in self.all_model_classes: A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowercase , lowercase , lowercase , lowercase ) @tooslow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' ) A_ : Dict = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=lowercase , truncation=lowercase ) A_ : Union[str, Any] = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' ) A_ : str = False A_ : Optional[Any] = model.config.eos_token_id A_ : Optional[Any] = jax.jit(model.generate ) A_ : Any = jit_generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences A_ : Optional[int] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) A_ : int = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(lowercase , lowercase ) @is_pt_flax_cross_test def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs A_ : Union[str, Any] = self._prepare_for_class(lowercase , lowercase ) A_ : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class A_ : Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning A_ : int = getattr(lowercase , lowercase ) A_ : Dict = pt_inputs["""input_ids"""].shape A_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowercase ): A_ : Tuple = 0 A_ : Union[str, Any] = 1 A_ : Any = 0 A_ : int = 1 A_ : Optional[Any] = pt_model_class(lowercase ).eval() A_ : List[str] = model_class(lowercase , dtype=jnp.floataa ) A_ : Optional[int] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase ) A_ : Tuple = fx_state with torch.no_grad(): A_ : Tuple = pt_model(**lowercase ).to_tuple() A_ : Dict = fx_model(**lowercase ).to_tuple() self.assertEqual(len(lowercase ) , len(lowercase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowercase , lowercase ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowercase ) A_ : int = model_class.from_pretrained(lowercase , from_pt=lowercase ) A_ : Tuple = fx_model_loaded(**lowercase ).to_tuple() self.assertEqual( len(lowercase ) , len(lowercase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(lowercase , lowercase ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs A_ : Any = self._prepare_for_class(lowercase , lowercase ) A_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class A_ : Optional[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning A_ : int = getattr(lowercase , lowercase ) A_ : Tuple = pt_model_class(lowercase ).eval() A_ : Union[str, Any] = model_class(lowercase , dtype=jnp.floataa ) A_ : Union[str, Any] = load_flax_weights_in_pytorch_model(lowercase , fx_model.params ) A_ : Any = pt_inputs["""input_ids"""].shape A_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(lowercase ): A_ : Dict = 0 A_ : str = 1 A_ : Tuple = 0 A_ : Union[str, Any] = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): A_ : str = pt_model(**lowercase ).to_tuple() A_ : List[Any] = fx_model(**lowercase ).to_tuple() self.assertEqual(len(lowercase ) , len(lowercase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowercase , lowercase ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowercase ) A_ : Optional[int] = pt_model_class.from_pretrained(lowercase , from_flax=lowercase ) with torch.no_grad(): A_ : str = pt_model_loaded(**lowercase ).to_tuple() self.assertEqual( len(lowercase ) , len(lowercase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(lowercase , lowercase ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def lowerCAmelCase_ ( self ): """simple docstring""" for model_class_name in self.all_model_classes: A_ : Any = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' ) A_ : List[Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowercase )
363
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer _UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name _UpperCAmelCase = """ Examples: ```py >>> from PIL import Image >>> import torch >>> from diffusers import DiffusionPipeline >>> from diffusers.utils import export_to_gif, load_image >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") >>> repo = \"openai/shap-e-img2img\" >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16) >>> pipe = pipe.to(device) >>> guidance_scale = 3.0 >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\" >>> image = load_image(image_url).convert(\"RGB\") >>> images = pipe( ... image, ... guidance_scale=guidance_scale, ... num_inference_steps=64, ... frame_size=256, ... ).images >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\") ``` """ @dataclass class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = 42 class UpperCAmelCase ( __A ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ): """simple docstring""" super().__init__() self.register_modules( prior=lowercase , image_encoder=lowercase , image_processor=lowercase , scheduler=lowercase , renderer=lowercase , ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): """simple docstring""" if latents is None: A_ : Optional[Any] = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) A_ : Optional[int] = latents.to(lowercase ) A_ : List[Any] = latents * scheduler.init_noise_sigma return latents def lowerCAmelCase_ ( self , lowercase=0 ): """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) A_ : Tuple = torch.device(F'''cuda:{gpu_id}''' ) A_ : Dict = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowercase , lowercase ) @property def lowerCAmelCase_ ( self ): """simple docstring""" if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ): return self.device for module in self.image_encoder.modules(): if ( hasattr(lowercase , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , ): """simple docstring""" if isinstance(lowercase , lowercase ) and isinstance(image[0] , torch.Tensor ): A_ : Tuple = torch.cat(lowercase , axis=0 ) if image[0].ndim == 4 else torch.stack(lowercase , axis=0 ) if not isinstance(lowercase , torch.Tensor ): A_ : Dict = self.image_processor(lowercase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 ) A_ : List[str] = image.to(dtype=self.image_encoder.dtype , device=lowercase ) A_ : Tuple = self.image_encoder(lowercase )['last_hidden_state'] A_ : Dict = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 A_ : List[str] = image_embeds.repeat_interleave(lowercase , dim=0 ) if do_classifier_free_guidance: A_ : str = torch.zeros_like(lowercase ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes A_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(lowercase ) def __call__( self , lowercase , lowercase = 1 , lowercase = 2_5 , lowercase = None , lowercase = None , lowercase = 4.0 , lowercase = 6_4 , lowercase = "pil" , lowercase = True , ): """simple docstring""" if isinstance(lowercase , PIL.Image.Image ): A_ : int = 1 elif isinstance(lowercase , torch.Tensor ): A_ : int = image.shape[0] elif isinstance(lowercase , lowercase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): A_ : List[str] = len(lowercase ) else: raise ValueError( F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase )}''' ) A_ : Any = self._execution_device A_ : List[Any] = batch_size * num_images_per_prompt A_ : int = guidance_scale > 1.0 A_ : Optional[int] = self._encode_image(lowercase , lowercase , lowercase , lowercase ) # prior self.scheduler.set_timesteps(lowercase , device=lowercase ) A_ : Dict = self.scheduler.timesteps A_ : int = self.prior.config.num_embeddings A_ : int = self.prior.config.embedding_dim A_ : Dict = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim A_ : Union[str, Any] = latents.reshape(latents.shape[0] , lowercase , lowercase ) for i, t in enumerate(self.progress_bar(lowercase ) ): # expand the latents if we are doing classifier free guidance A_ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A_ : List[Any] = self.scheduler.scale_model_input(lowercase , lowercase ) A_ : Any = self.prior( lowercase , timestep=lowercase , proj_embedding=lowercase , ).predicted_image_embedding # remove the variance A_ , A_ : int = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: A_ , A_ : List[Any] = noise_pred.chunk(2 ) A_ : str = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) A_ : Optional[int] = self.scheduler.step( lowercase , timestep=lowercase , sample=lowercase , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=lowercase ) A_ : str = [] for i, latent in enumerate(lowercase ): print() A_ : Optional[Any] = self.renderer.decode( latent[None, :] , lowercase , size=lowercase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , ) images.append(lowercase ) A_ : Dict = torch.stack(lowercase ) if output_type not in ["np", "pil"]: raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' ) A_ : Dict = images.cpu().numpy() if output_type == "pil": A_ : str = [self.numpy_to_pil(lowercase ) for image in images] # Offload last model to CPU if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=lowercase )
192
0
import os import pytest from transformers.dynamic_module_utils import get_imports SCREAMING_SNAKE_CASE__ : str = '\nimport os\n' SCREAMING_SNAKE_CASE__ : str = '\ndef foo():\n import os\n return False\n' SCREAMING_SNAKE_CASE__ : Any = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n' SCREAMING_SNAKE_CASE__ : Optional[int] = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n' SCREAMING_SNAKE_CASE__ : Union[str, Any] = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n' SCREAMING_SNAKE_CASE__ : List[str] = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n' SCREAMING_SNAKE_CASE__ : Any = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n' SCREAMING_SNAKE_CASE__ : str = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n' SCREAMING_SNAKE_CASE__ : Optional[Any] = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n' SCREAMING_SNAKE_CASE__ : Tuple = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n' SCREAMING_SNAKE_CASE__ : Optional[Any] = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize("case" ,_SCREAMING_SNAKE_CASE ) def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]: lowerCamelCase : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE ,"test_file.py" ) with open(_SCREAMING_SNAKE_CASE ,"w" ) as _tmp_file: _tmp_file.write(_SCREAMING_SNAKE_CASE ) lowerCamelCase : Any = get_imports(_SCREAMING_SNAKE_CASE ) assert parsed_imports == ["os"]
48
'''simple docstring''' from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def __UpperCAmelCase ( a_: str = "isbn/0140328726" ): _UpperCAmelCase : Dict = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes if new_olid.count("/" ) != 1: _UpperCAmelCase : Optional[Any] = f"""{olid} is not a valid Open Library olid""" raise ValueError(a_ ) return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json() def __UpperCAmelCase ( a_: dict ): _UpperCAmelCase : Dict = { "title": "Title", "publish_date": "Publish date", "authors": "Authors", "number_of_pages": "Number of pages:", "first_sentence": "First sentence", "isbn_10": "ISBN (10)", "isbn_13": "ISBN (13)", } _UpperCAmelCase : Any = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} _UpperCAmelCase : Tuple = [ get_openlibrary_data(author["key"] )["name"] for author in data["Authors"] ] _UpperCAmelCase : Optional[int] = data["First sentence"]["value"] for key, value in data.items(): if isinstance(a_, a_ ): _UpperCAmelCase : List[Any] = ", ".join(a_ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: __a = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.') continue print(f'\nSearching Open Library for ISBN: {isbn}...\n') try: __a = summarize_book(get_openlibrary_data(f'isbn/{isbn}')) print('\n'.join(f'{key}: {value}' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f'Sorry, there are no results for ISBN: {isbn}.')
145
0
import math def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]: """simple docstring""" if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(UpperCamelCase__ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('This should never happen' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. __A = "Enter the base and the power separated by a comma: " __A , __A = map(int, input(prompt).split(",")) __A , __A = map(int, input(prompt).split(",")) # We find the log of each number, using the function res(), which takes two # arguments. __A = res(xa, ya) __A = res(xa, ya) # We check for the largest number if resa > resa: print("Largest number is", xa, "^", ya) elif resa > resa: print("Largest number is", xa, "^", ya) else: print("Both are equal")
348
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=32 * 8 , lowerCamelCase__=32 * 8 , lowerCamelCase__=4 , lowerCamelCase__=64 , ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = is_training __lowerCamelCase = use_auxiliary_loss __lowerCamelCase = num_queries __lowerCamelCase = num_channels __lowerCamelCase = min_size __lowerCamelCase = max_size __lowerCamelCase = num_labels __lowerCamelCase = hidden_dim __lowerCamelCase = hidden_dim def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( lowerCamelCase__ ) __lowerCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ ) __lowerCamelCase = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5 ).float() __lowerCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long() __lowerCamelCase = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = MaskaFormerConfig( hidden_size=self.hidden_dim , ) __lowerCamelCase = self.num_queries __lowerCamelCase = self.num_labels __lowerCamelCase = [1, 1, 1, 1] __lowerCamelCase = self.num_channels __lowerCamelCase = 64 __lowerCamelCase = 128 __lowerCamelCase = self.hidden_dim __lowerCamelCase = self.hidden_dim __lowerCamelCase = self.hidden_dim return config def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs() __lowerCamelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask} return config, inputs_dict def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' __lowerCamelCase = output.encoder_hidden_states __lowerCamelCase = output.pixel_decoder_hidden_states __lowerCamelCase = output.transformer_decoder_hidden_states self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple: '''simple docstring''' with torch.no_grad(): __lowerCamelCase = MaskaFormerModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() __lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ ) __lowerCamelCase = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: '''simple docstring''' __lowerCamelCase = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() def comm_check_on_output(lowerCamelCase__ ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): __lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ ) __lowerCamelCase = model(lowerCamelCase__ ) comm_check_on_output(lowerCamelCase__ ) __lowerCamelCase = model( pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ) comm_check_on_output(lowerCamelCase__ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () snake_case_ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} snake_case_ = False snake_case_ = False snake_case_ = False snake_case_ = False def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase = MaskaFormerModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ ) @unittest.skip(reason='Mask2Former does not use inputs_embeds' ) def lowercase_ ( self ) -> Any: '''simple docstring''' pass @unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' ) def lowercase_ ( self ) -> Tuple: '''simple docstring''' pass @unittest.skip(reason='Mask2Former is not a generative model' ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason='Mask2Former does not use token embeddings' ) def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip( reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def lowercase_ ( self ) -> Dict: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowercase_ ( self ) -> List[str]: '''simple docstring''' pass def lowercase_ ( self ) -> List[str]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(lowerCamelCase__ ) __lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase = [*signature.parameters.keys()] __lowerCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) @slow def lowercase_ ( self ) -> int: '''simple docstring''' for model_name in ["facebook/mask2former-swin-small-coco-instance"]: __lowerCamelCase = MaskaFormerModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase = (self.model_tester.min_size,) * 2 __lowerCamelCase = { 'pixel_values': torch.randn((2, 3, *size) , device=lowerCamelCase__ ), 'mask_labels': torch.randn((2, 10, *size) , device=lowerCamelCase__ ), 'class_labels': torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(), } __lowerCamelCase = self.model_tester.get_config() __lowerCamelCase = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ ) __lowerCamelCase = model(**lowerCamelCase__ ) self.assertTrue(outputs.loss is not None ) def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ ) __lowerCamelCase = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ ) self.assertTrue(outputs.attentions is not None ) def lowercase_ ( self ) -> Optional[Any]: '''simple docstring''' if not self.model_tester.is_training: return __lowerCamelCase = self.all_model_classes[1] __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = model_class(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.train() __lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss loss.backward() def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = self.all_model_classes[1] __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs() __lowerCamelCase = True __lowerCamelCase = True __lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ ) model.train() __lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ) __lowerCamelCase = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() __lowerCamelCase = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() __lowerCamelCase = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() __lowerCamelCase = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=lowerCamelCase__ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A = 1e-4 def lowerCamelCase_ ( ) -> List[Any]: """simple docstring""" __lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @slow class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def lowercase_ ( self ) -> List[str]: '''simple docstring''' return "facebook/mask2former-swin-small-coco-instance" @cached_property def lowercase_ ( self ) -> Dict: '''simple docstring''' return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ) __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ ) __lowerCamelCase = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) ) with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) __lowerCamelCase = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(lowerCamelCase__ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) __lowerCamelCase = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(lowerCamelCase__ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) __lowerCamelCase = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(lowerCamelCase__ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval() __lowerCamelCase = self.default_image_processor __lowerCamelCase = prepare_img() __lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ ) __lowerCamelCase = inputs['pixel_values'].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) ) with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) # masks_queries_logits __lowerCamelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) __lowerCamelCase = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] __lowerCamelCase = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) # class_queries_logits __lowerCamelCase = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) __lowerCamelCase = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) ) def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval() __lowerCamelCase = self.default_image_processor __lowerCamelCase = image_processor( [np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , ) __lowerCamelCase = inputs['pixel_values'].to(lowerCamelCase__ ) __lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['mask_labels']] __lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['class_labels']] with torch.no_grad(): __lowerCamelCase = model(**lowerCamelCase__ ) self.assertTrue(outputs.loss is not None )
348
1
import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Tuple = CTRLTokenizer _lowercase : Optional[int] = False _lowercase : Dict = False def lowerCamelCase_ ( self: List[Any] ) -> int: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase__ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>'''] lowercase__ = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) lowercase__ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', ''''''] lowercase__ = {'''unk_token''': '''<unk>'''} lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCamelCase_ ) ) def lowerCamelCase_ ( self: List[Any] , **UpperCamelCase_: List[str] ) -> Any: """simple docstring""" kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Optional[int] ) -> Optional[Any]: """simple docstring""" lowercase__ = '''adapt react readapt apt''' lowercase__ = '''adapt react readapt apt''' return input_text, output_text def lowerCamelCase_ ( self: Dict ) -> int: """simple docstring""" lowercase__ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase__ = '''adapt react readapt apt''' lowercase__ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split() lowercase__ = tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) lowercase__ = tokens + [tokenizer.unk_token] lowercase__ = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
110
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _a ( UpperCamelCase__ , unittest.TestCase ): _lowercase : Union[str, Any] = KandinskyVaaImgaImgPipeline _lowercase : Tuple = ['''image_embeds''', '''negative_image_embeds''', '''image'''] _lowercase : Any = [ '''image_embeds''', '''negative_image_embeds''', '''image''', ] _lowercase : Union[str, Any] = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] _lowercase : Optional[Any] = False @property def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict: """simple docstring""" return 32 @property def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]: """simple docstring""" return 32 @property def lowerCamelCase_ ( self: Any ) -> Any: """simple docstring""" return self.time_input_dim @property def lowerCamelCase_ ( self: Tuple ) -> Any: """simple docstring""" return self.time_input_dim * 4 @property def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]: """simple docstring""" return 100 @property def lowerCamelCase_ ( self: int ) -> int: """simple docstring""" torch.manual_seed(0 ) lowercase__ = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } lowercase__ = UNetaDConditionModel(**UpperCamelCase_ ) return model @property def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]: """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCamelCase_ ( self: Optional[Any] ) -> int: """simple docstring""" torch.manual_seed(0 ) lowercase__ = VQModel(**self.dummy_movq_kwargs ) return model def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]: """simple docstring""" lowercase__ = self.dummy_unet lowercase__ = self.dummy_movq lowercase__ = { '''num_train_timesteps''': 1_000, '''beta_schedule''': '''linear''', '''beta_start''': 0.00085, '''beta_end''': 0.012, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } lowercase__ = DDIMScheduler(**UpperCamelCase_ ) lowercase__ = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int]=0 ) -> Optional[int]: """simple docstring""" lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ ) lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( UpperCamelCase_ ) # create init_image lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ ) lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((256, 256) ) if str(UpperCamelCase_ ).startswith('''mps''' ): lowercase__ = torch.manual_seed(UpperCamelCase_ ) else: lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) lowercase__ = { '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def lowerCamelCase_ ( self: Optional[int] ) -> Dict: """simple docstring""" lowercase__ = '''cpu''' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**UpperCamelCase_ ) lowercase__ = pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) ) lowercase__ = output.images lowercase__ = pipe( **self.get_dummy_inputs(UpperCamelCase_ ) , return_dict=UpperCamelCase_ , )[0] lowercase__ = image[0, -3:, -3:, -1] lowercase__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ = np.array( [0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' @slow @require_torch_gpu class _a ( unittest.TestCase ): def lowerCamelCase_ ( self: str ) -> List[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]: """simple docstring""" lowercase__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_img2img_frog.npy''' ) lowercase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) lowercase__ = '''A red cartoon frog, 4k''' lowercase__ = KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(UpperCamelCase_ ) lowercase__ = KandinskyVaaImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa ) lowercase__ = pipeline.to(UpperCamelCase_ ) pipeline.set_progress_bar_config(disable=UpperCamelCase_ ) lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowercase__ , lowercase__ = pipe_prior( UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() lowercase__ = pipeline( image=UpperCamelCase_ , image_embeds=UpperCamelCase_ , negative_image_embeds=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , ) lowercase__ = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
110
1
"""simple docstring""" import os _a = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 1_00, """D""": 5_00, """M""": 10_00} def _A ( UpperCamelCase_ : Optional[int]) -> Optional[Any]: '''simple docstring''' __lowercase = 0 __lowercase = 0 while index < len(_A) - 1: __lowercase = SYMBOLS[numerals[index]] __lowercase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def _A ( UpperCamelCase_ : str) -> Dict: '''simple docstring''' __lowercase = '' __lowercase = num // 1000 numerals += m_count * "M" num %= 1000 __lowercase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 __lowercase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def _A ( UpperCamelCase_ : List[str] = "/p089_roman.txt") -> Dict: '''simple docstring''' __lowercase = 0 with open(os.path.dirname(_A) + roman_numerals_filename) as filea: __lowercase = filea.readlines() for line in lines: __lowercase = line.strip() __lowercase = parse_roman_numerals(_A) __lowercase = generate_roman_numerals(_A) savings += len(_A) - len(_A) return savings if __name__ == "__main__": print(F"{solution() = }")
353
"""simple docstring""" from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
144
0
"""simple docstring""" import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline __UpperCamelCase : Optional[int] = { '''n_samples''': 6_4, '''horizon''': 3_2, '''num_inference_steps''': 2_0, '''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network '''scale_grad_by_std''': True, '''scale''': 0.1, '''eta''': 0.0, '''t_grad_cutoff''': 2, '''device''': '''cpu''', } if __name__ == "__main__": __UpperCamelCase : Optional[int] = '''hopper-medium-v2''' __UpperCamelCase : Optional[int] = gym.make(env_name) __UpperCamelCase : List[Any] = ValueGuidedRLPipeline.from_pretrained( '''bglick13/hopper-medium-v2-value-function-hor32''', env=env, ) env.seed(0) __UpperCamelCase : List[Any] = env.reset() __UpperCamelCase : Optional[Any] = 0 __UpperCamelCase : Any = 0 __UpperCamelCase : Any = 1_0_0_0 __UpperCamelCase : Dict = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy __UpperCamelCase : Optional[int] = pipeline(obs, planning_horizon=3_2) # execute action in environment __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[Any] = env.step(denorm_actions) __UpperCamelCase : Optional[Any] = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:''' F''' {total_score}''' ) # save observations for rendering rollout.append(next_observation.copy()) __UpperCamelCase : str = next_observation except KeyboardInterrupt: pass print(F'''Total reward: {total_reward}''')
106
"""simple docstring""" import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput __UpperCamelCase : Optional[Any] = '''scheduler_config.json''' class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" lowercase__ = 1 lowercase__ = 2 lowercase__ = 3 lowercase__ = 4 lowercase__ = 5 @dataclass class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" lowercase__ = 42 class SCREAMING_SNAKE_CASE : """simple docstring""" lowercase__ = SCHEDULER_CONFIG_NAME lowercase__ = ["dtype"] lowercase__ = [] lowercase__ = True @classmethod def __lowerCAmelCase ( cls : List[Any] ,lowercase_ : Dict[str, Any] = None ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[int]=False ,**lowercase_ : Any ,): lowerCAmelCase__ ,lowerCAmelCase__ : Dict = cls.load_config( pretrained_model_name_or_path=lowercase_ ,subfolder=lowercase_ ,return_unused_kwargs=lowercase_ ,**lowercase_ ,) lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = cls.from_config(lowercase_ ,return_unused_kwargs=lowercase_ ,**lowercase_ ) if hasattr(lowercase_ ,'''create_state''' ) and getattr(lowercase_ ,'''has_state''' ,lowercase_ ): lowerCAmelCase__ : List[Any] = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def __lowerCAmelCase ( self : Tuple ,lowercase_ : Union[str, os.PathLike] ,lowercase_ : bool = False ,**lowercase_ : str ): self.save_config(save_directory=lowercase_ ,push_to_hub=lowercase_ ,**lowercase_ ) @property def __lowerCAmelCase ( self : List[str] ): return self._get_compatibles() @classmethod def __lowerCAmelCase ( cls : List[Any] ): lowerCAmelCase__ : Tuple = list(set([cls.__name__] + cls._compatibles ) ) lowerCAmelCase__ : Tuple = importlib.import_module(__name__.split('''.''' )[0] ) lowerCAmelCase__ : Union[str, Any] = [ getattr(lowercase_ ,lowercase_ ) for c in compatible_classes_str if hasattr(lowercase_ ,lowercase_ ) ] return compatible_classes def __SCREAMING_SNAKE_CASE ( A_ , A_ ): assert len(A_ ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(A_ ) - x.ndim) ) , A_ ) def __SCREAMING_SNAKE_CASE ( A_ , A_=0.999 , A_=jnp.floataa ): def alpha_bar(A_ ): return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2 lowerCAmelCase__ : Optional[Any] = [] for i in range(A_ ): lowerCAmelCase__ : str = i / num_diffusion_timesteps lowerCAmelCase__ : Union[str, Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(A_ ) / alpha_bar(A_ ) , A_ ) ) return jnp.array(A_ , dtype=A_ ) @flax.struct.dataclass class SCREAMING_SNAKE_CASE : """simple docstring""" lowercase__ = 42 lowercase__ = 42 lowercase__ = 42 @classmethod def __lowerCAmelCase ( cls : Union[str, Any] ,lowercase_ : List[Any] ): lowerCAmelCase__ : Optional[int] = scheduler.config if config.trained_betas is not None: lowerCAmelCase__ : Any = jnp.asarray(config.trained_betas ,dtype=scheduler.dtype ) elif config.beta_schedule == "linear": lowerCAmelCase__ : Union[str, Any] = jnp.linspace(config.beta_start ,config.beta_end ,config.num_train_timesteps ,dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. lowerCAmelCase__ : int = ( jnp.linspace( config.beta_start**0.5 ,config.beta_end**0.5 ,config.num_train_timesteps ,dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule lowerCAmelCase__ : List[Any] = betas_for_alpha_bar(config.num_train_timesteps ,dtype=scheduler.dtype ) else: raise NotImplementedError( F'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' ) lowerCAmelCase__ : str = 1.0 - betas lowerCAmelCase__ : Union[str, Any] = jnp.cumprod(lowercase_ ,axis=0 ) return cls( alphas=lowercase_ ,betas=lowercase_ ,alphas_cumprod=lowercase_ ,) def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ): lowerCAmelCase__ : Any = state.alphas_cumprod lowerCAmelCase__ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5 lowerCAmelCase__ : Tuple = sqrt_alpha_prod.flatten() lowerCAmelCase__ : str = broadcast_to_shape_from_left(A_ , original_samples.shape ) lowerCAmelCase__ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5 lowerCAmelCase__ : Optional[Any] = sqrt_one_minus_alpha_prod.flatten() lowerCAmelCase__ : Optional[int] = broadcast_to_shape_from_left(A_ , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ): lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = get_sqrt_alpha_prod(A_ , A_ , A_ , A_ ) lowerCAmelCase__ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ): lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = get_sqrt_alpha_prod(A_ , A_ , A_ , A_ ) lowerCAmelCase__ : Union[str, Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
106
1
'''simple docstring''' from dataclasses import dataclass from typing import Tuple import numpy as np import torch @dataclass class __SCREAMING_SNAKE_CASE : '''simple docstring''' lowerCamelCase_ :torch.Tensor # [batch_size x 3] lowerCamelCase_ :torch.Tensor # [batch_size x 3] lowerCamelCase_ :torch.Tensor # [batch_size x 3] lowerCamelCase_ :torch.Tensor # [batch_size x 3] lowerCamelCase_ :int lowerCamelCase_ :int lowerCamelCase_ :float lowerCamelCase_ :float lowerCamelCase_ :Tuple[int] def _UpperCamelCase ( self ): '''simple docstring''' assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0] assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3 assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2 def _UpperCamelCase ( self ): '''simple docstring''' return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) ) def _UpperCamelCase ( self ): '''simple docstring''' return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) ) def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : List[Any] = torch.arange(self.height * self.width ) UpperCAmelCase_ : Optional[int] = torch.stack( [ pixel_indices % self.width, torch.div(UpperCamelCase_ , self.width , rounding_mode='trunc' ), ] , axis=1 , ) return coords @property def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ , *UpperCAmelCase_ : int = self.shape UpperCAmelCase_ : int = int(np.prod(UpperCamelCase_ ) ) UpperCAmelCase_ : List[str] = self.get_image_coords() UpperCAmelCase_ : Union[str, Any] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] ) UpperCAmelCase_ : Tuple = self.get_camera_rays(UpperCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = rays.view(UpperCamelCase_ , inner_batch_size * self.height * self.width , 2 , 3 ) return rays def _UpperCamelCase ( self , snake_case_ ): '''simple docstring''' UpperCAmelCase_ , *UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = coords.shape assert n_coords == 2 assert batch_size == self.origin.shape[0] UpperCAmelCase_ : Tuple = coords.view(UpperCamelCase_ , -1 , 2 ) UpperCAmelCase_ : Tuple = self.resolution() UpperCAmelCase_ : str = self.fov() UpperCAmelCase_ : List[Any] = (flat.float() / (res - 1)) * 2 - 1 UpperCAmelCase_ : Any = fracs * torch.tan(fov / 2 ) UpperCAmelCase_ : str = fracs.view(UpperCamelCase_ , -1 , 2 ) UpperCAmelCase_ : Dict = ( self.z.view(UpperCamelCase_ , 1 , 3 ) + self.x.view(UpperCamelCase_ , 1 , 3 ) * fracs[:, :, :1] + self.y.view(UpperCamelCase_ , 1 , 3 ) * fracs[:, :, 1:] ) UpperCAmelCase_ : Tuple = directions / directions.norm(dim=-1 , keepdim=UpperCamelCase_ ) UpperCAmelCase_ : Dict = torch.stack( [ torch.broadcast_to(self.origin.view(UpperCamelCase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ), directions, ] , dim=2 , ) return rays.view(UpperCamelCase_ , *UpperCamelCase_ , 2 , 3 ) def _UpperCamelCase ( self , snake_case_ , snake_case_ ): '''simple docstring''' assert width * self.height == height * self.width, "The aspect ratio should not change." return DifferentiableProjectiveCamera( origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCamelCase_ , height=UpperCamelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , ) def _lowerCamelCase ( lowerCamelCase_ : List[Any] ): """simple docstring""" UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : str = [] UpperCAmelCase_ : Any = [] for theta in np.linspace(0 , 2 * np.pi , num=20 ): UpperCAmelCase_ : Dict = np.array([np.sin(lowerCamelCase_ ), np.cos(lowerCamelCase_ ), -0.5] ) z /= np.sqrt(np.sum(z**2 ) ) UpperCAmelCase_ : int = -z * 4 UpperCAmelCase_ : Tuple = np.array([np.cos(lowerCamelCase_ ), -np.sin(lowerCamelCase_ ), 0.0] ) UpperCAmelCase_ : List[Any] = np.cross(lowerCamelCase_ , lowerCamelCase_ ) origins.append(lowerCamelCase_ ) xs.append(lowerCamelCase_ ) ys.append(lowerCamelCase_ ) zs.append(lowerCamelCase_ ) return DifferentiableProjectiveCamera( origin=torch.from_numpy(np.stack(lowerCamelCase_ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCamelCase_ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCamelCase_ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCamelCase_ , axis=0 ) ).float() , width=lowerCamelCase_ , height=lowerCamelCase_ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCamelCase_ )) , )
355
'''simple docstring''' from manim import * class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ): '''simple docstring''' def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : Dict = Rectangle(height=0.5 , width=0.5 ) UpperCAmelCase_ : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) UpperCAmelCase_ : List[str] = Rectangle(height=0.25 , width=0.25 ) UpperCAmelCase_ : Any = [mem.copy() for i in range(6 )] UpperCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )] UpperCAmelCase_ : Optional[int] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) UpperCAmelCase_ : Any = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) UpperCAmelCase_ : str = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 ) UpperCAmelCase_ : Any = Text('CPU' , font_size=2_4 ) UpperCAmelCase_ : Tuple = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(snake_case_ ) UpperCAmelCase_ : str = [mem.copy() for i in range(4 )] UpperCAmelCase_ : Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) UpperCAmelCase_ : List[str] = Text('GPU' , font_size=2_4 ) UpperCAmelCase_ : Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) gpu.move_to([-1, -1, 0] ) self.add(snake_case_ ) UpperCAmelCase_ : str = [mem.copy() for i in range(6 )] UpperCAmelCase_ : Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) UpperCAmelCase_ : str = Text('Model' , font_size=2_4 ) UpperCAmelCase_ : Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) model.move_to([3, -1.0, 0] ) self.add(snake_case_ ) UpperCAmelCase_ : str = [] UpperCAmelCase_ : Optional[Any] = [] for i, rect in enumerate(snake_case_ ): UpperCAmelCase_ : str = fill.copy().set_fill(snake_case_ , opacity=0.8 ) target.move_to(snake_case_ ) model_arr.append(snake_case_ ) UpperCAmelCase_ : int = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(snake_case_ ) self.add(*snake_case_ , *snake_case_ ) UpperCAmelCase_ : List[Any] = [meta_mem.copy() for i in range(6 )] UpperCAmelCase_ : List[str] = [meta_mem.copy() for i in range(6 )] UpperCAmelCase_ : Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) UpperCAmelCase_ : Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 ) UpperCAmelCase_ : Optional[Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 ) UpperCAmelCase_ : Tuple = Text('Disk' , font_size=2_4 ) UpperCAmelCase_ : Union[str, Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ ) disk.move_to([-4, -1.25, 0] ) self.add(snake_case_ , snake_case_ ) UpperCAmelCase_ : List[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) UpperCAmelCase_ : Any = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(snake_case_ , snake_case_ ) UpperCAmelCase_ : Dict = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , ) blue_text.next_to(snake_case_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(snake_case_ ) UpperCAmelCase_ : Optional[Any] = MarkupText( F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) self.play(Write(snake_case_ ) ) UpperCAmelCase_ : Tuple = Square(0.3 ) input.set_fill(snake_case_ , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , snake_case_ , buff=0.5 ) self.play(Write(snake_case_ ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=snake_case_ , buff=0.02 ) self.play(MoveToTarget(snake_case_ ) ) self.play(FadeOut(snake_case_ ) ) UpperCAmelCase_ : Any = Arrow(start=snake_case_ , end=snake_case_ , color=snake_case_ , buff=0.5 ) a.next_to(model_arr[0].get_left() , snake_case_ , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) UpperCAmelCase_ : List[str] = MarkupText( F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) self.play(Write(snake_case_ , run_time=3 ) ) UpperCAmelCase_ : List[Any] = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02} self.play( Write(snake_case_ ) , Circumscribe(model_arr[0] , color=snake_case_ , **snake_case_ ) , Circumscribe(model_cpu_arr[0] , color=snake_case_ , **snake_case_ ) , Circumscribe(gpu_rect[0] , color=snake_case_ , **snake_case_ ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) UpperCAmelCase_ : Union[str, Any] = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , snake_case_ , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) UpperCAmelCase_ : Tuple = AnimationGroup( FadeOut(snake_case_ , run_time=0.5 ) , MoveToTarget(snake_case_ , run_time=0.5 ) , FadeIn(snake_case_ , run_time=0.5 ) , lag_ratio=0.2 ) self.play(snake_case_ ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: UpperCAmelCase_ : Any = 0.7 self.play( Circumscribe(model_arr[i] , **snake_case_ ) , Circumscribe(cpu_left_col_base[i] , **snake_case_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=snake_case_ , **snake_case_ ) , Circumscribe(gpu_rect[0] , color=snake_case_ , **snake_case_ ) , Circumscribe(model_arr[i + 1] , color=snake_case_ , **snake_case_ ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=snake_case_ , **snake_case_ ) , Circumscribe(cpu_left_col_base[-1] , color=snake_case_ , **snake_case_ ) , Circumscribe(gpu_rect[0] , color=snake_case_ , **snake_case_ ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) UpperCAmelCase_ : Any = a_c UpperCAmelCase_ : int = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(snake_case_ ) , FadeOut(snake_case_ , run_time=0.5 ) , ) UpperCAmelCase_ : Optional[Any] = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=2_4 ) step_a.move_to([2, 2, 0] ) self.play(Write(snake_case_ , run_time=3 ) , MoveToTarget(snake_case_ ) ) self.wait()
274
0
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm lowerCamelCase : Any = logging.get_logger(__name__) @dataclass class __lowercase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" _snake_case = [ """no_inference""", """no_cuda""", """no_tpu""", """no_speed""", """no_memory""", """no_env_print""", """no_multi_process""", ] def __init__( self , **A ) -> List[Any]: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: snake_case : int = deprecated_arg[3:] setattr(self , A , not kwargs.pop(A ) ) logger.warning( f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or""" f""" {positive_arg}={kwargs[positive_arg]}""" ) snake_case : Tuple = kwargs.pop("""torchscript""" , self.torchscript ) snake_case : Tuple = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics ) snake_case : List[str] = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level ) super().__init__(**A ) _snake_case = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Trace the models using torchscript"""} ) _snake_case = field(default=SCREAMING_SNAKE_CASE__ , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} ) _snake_case = field( default="""O1""" , metadata={ """help""": ( """For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. """ """See details at https://nvidia.github.io/apex/amp.html""" ) } , ) @cached_property def UpperCAmelCase ( self ) -> Tuple["torch.device", int]: requires_backends(self , ["""torch"""] ) logger.info("""PyTorch: setting up devices""" ) if not self.cuda: snake_case : int = torch.device("""cpu""" ) snake_case : str = 0 elif is_torch_tpu_available(): snake_case : Optional[Any] = xm.xla_device() snake_case : Optional[int] = 0 else: snake_case : Optional[int] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) snake_case : Optional[int] = torch.cuda.device_count() return device, n_gpu @property def UpperCAmelCase ( self ) -> int: return is_torch_tpu_available() and self.tpu @property def UpperCAmelCase ( self ) -> int: requires_backends(self , ["""torch"""] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def UpperCAmelCase ( self ) -> "torch.device": requires_backends(self , ["""torch"""] ) return self._setup_devices[0] @property def UpperCAmelCase ( self ) -> Dict: requires_backends(self , ["""torch"""] ) return self._setup_devices[1] @property def UpperCAmelCase ( self ) -> List[Any]: return self.n_gpu > 0
124
import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" lowerCamelCase__ = CpmAntTokenizer lowerCamelCase__ = False def __A ( self : List[str] ) -> str: super().setUp() SCREAMING_SNAKE_CASE_ = [ "<d>", "</d>", "<s>", "</s>", "</_>", "<unk>", "<pad>", "</n>", "我", "是", "C", "P", "M", "A", "n", "t", ] SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) @tooslow def __A ( self : int ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" ) SCREAMING_SNAKE_CASE_ = "今天天气真好!" SCREAMING_SNAKE_CASE_ = ["今天", "天气", "真", "好", "!"] SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) SCREAMING_SNAKE_CASE_ = "今天天气真好!" SCREAMING_SNAKE_CASE_ = [tokenizer.bos_token] + tokens SCREAMING_SNAKE_CASE_ = [6, 9_802, 14_962, 2_082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ ) SCREAMING_SNAKE_CASE_ = tokenizer.decode(__magic_name__ ) self.assertEqual(__magic_name__ , __magic_name__ )
118
0
'''simple docstring''' import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def __lowerCAmelCase (*__lowerCAmelCase ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase : Any = list(__lowerCAmelCase ) for i in range(len(__lowerCAmelCase ) ): _UpperCAmelCase : str = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : Any = [ "CUDA out of memory.", # CUDA OOM "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU "DefaultCPUAllocator: can't allocate memory", # CPU OOM ] if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def __lowerCAmelCase (__lowerCAmelCase = None , __lowerCAmelCase = 128 ): if function is None: return functools.partial(__lowerCAmelCase , starting_batch_size=__lowerCAmelCase ) _UpperCAmelCase : Any = starting_batch_size def decorator(*__lowerCAmelCase , **__lowerCAmelCase ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() _UpperCAmelCase : str = list(inspect.signature(__lowerCAmelCase ).parameters.keys() ) # Guard against user error if len(__lowerCAmelCase ) < (len(__lowerCAmelCase ) + 1): _UpperCAmelCase : Dict = ", ".join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( F"""Batch size was passed into `{function.__name__}` as the first argument when called.""" F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" ) while True: if batch_size == 0: raise RuntimeError("No executable batch size found, reached zero." ) try: return function(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ) except Exception as e: if should_reduce_batch_size(__lowerCAmelCase ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
322
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1_024 , __lowerCAmelCase=1_024 , __lowerCAmelCase=False , **__lowerCAmelCase ): _UpperCAmelCase : Any = AutoTokenizer.from_pretrained(__lowerCAmelCase ) _UpperCAmelCase : List[str] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="train" , **__lowerCAmelCase ) _UpperCAmelCase : Dict = tok.pad_token_id def get_lens(__lowerCAmelCase ): _UpperCAmelCase : Union[str, Any] = tqdm( DataLoader(__lowerCAmelCase , batch_size=512 , num_workers=8 , shuffle=__lowerCAmelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) _UpperCAmelCase : List[str] = [] for batch in dl: _UpperCAmelCase : Any = batch["input_ids"].ne(__lowerCAmelCase ).sum(1 ).tolist() _UpperCAmelCase : Tuple = batch["labels"].ne(__lowerCAmelCase ).sum(1 ).tolist() if consider_target: for src, tgt in zip(__lowerCAmelCase , __lowerCAmelCase ): max_lens.append(max(__lowerCAmelCase , __lowerCAmelCase ) ) else: max_lens.extend(__lowerCAmelCase ) return max_lens _UpperCAmelCase : Dict = get_lens(__lowerCAmelCase ) _UpperCAmelCase : Optional[Any] = SeqaSeqDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , type_path="val" , **__lowerCAmelCase ) _UpperCAmelCase : Union[str, Any] = get_lens(__lowerCAmelCase ) pickle_save(__lowerCAmelCase , train_ds.len_file ) pickle_save(__lowerCAmelCase , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
322
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : List[Any] = { "configuration_nllb_moe": [ "NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP", "NllbMoeConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = [ "NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST", "NllbMoeForConditionalGeneration", "NllbMoeModel", "NllbMoePreTrainedModel", "NllbMoeTop2Router", "NllbMoeSparseMLP", ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys _lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
93
"""simple docstring""" import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer _A = """bart""" _A = True @st.cache(allow_output_mutation=__UpperCAmelCase ) def lowercase_ ( ) -> Optional[Any]: if LOAD_DENSE_INDEX: lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" ) lowerCAmelCase__ : Tuple = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" ) lowerCAmelCase__ : List[str] = qar_model.eval() else: lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = (None, None) if MODEL_TYPE == "bart": lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" ) lowerCAmelCase__ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" ) lowerCAmelCase__ : List[str] = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" ) sas_model.load_state_dict(save_dict["""model"""] ) lowerCAmelCase__ : Dict = sas_model.eval() else: lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = make_qa_sas_model( model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=__UpperCAmelCase ) def lowercase_ ( ) -> Union[str, Any]: if LOAD_DENSE_INDEX: lowerCAmelCase__ : Union[str, Any] = faiss.StandardGpuResources() lowerCAmelCase__ : List[Any] = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""] lowerCAmelCase__ : Dict = np.memmap( """wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , ) lowerCAmelCase__ : str = faiss.IndexFlatIP(128 ) lowerCAmelCase__ : Optional[int] = faiss.index_cpu_to_gpu(__UpperCAmelCase , 1 , __UpperCAmelCase ) wikiaab_gpu_index_flat.add(__UpperCAmelCase ) # TODO fix for larger GPU else: lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = (None, None) lowerCAmelCase__ : Optional[int] = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=__UpperCAmelCase ) def lowercase_ ( ) -> List[str]: lowerCAmelCase__ : List[Any] = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" ) lowerCAmelCase__ : Tuple = elia["""train_eli5"""] lowerCAmelCase__ : Dict = np.memmap( """eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) ) lowerCAmelCase__ : Dict = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(__UpperCAmelCase ) return (elia_train, eli5_train_q_index) _A , _A , _A = load_indexes() _A , _A , _A , _A = load_models() _A , _A = load_train_data() def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=10 ) -> Optional[Any]: lowerCAmelCase__ : str = embed_questions_for_retrieval([question] , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = eli5_train_q_index.search(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[Any] = [elia_train[int(__UpperCAmelCase )] for i in I[0]] return nn_examples def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase="wiki40b" , __UpperCAmelCase="dense" , __UpperCAmelCase=10 ) -> List[str]: if source == "none": lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), []) else: if method == "dense": lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = query_qa_dense_index( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: lowerCAmelCase__ , lowerCAmelCase__ : int = query_es_index( __UpperCAmelCase , __UpperCAmelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=__UpperCAmelCase , ) lowerCAmelCase__ : Optional[int] = [ (res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst ] lowerCAmelCase__ : Optional[Any] = """question: {} context: {}""".format(__UpperCAmelCase , __UpperCAmelCase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda __UpperCAmelCase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda __UpperCAmelCase : None), } ) def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=64 , __UpperCAmelCase=256 , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=0.95 , __UpperCAmelCase=0.8 ) -> Optional[int]: with torch.no_grad(): lowerCAmelCase__ : List[Any] = qa_sas_generate( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , num_answers=1 , num_beams=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase , do_sample=__UpperCAmelCase , temp=__UpperCAmelCase , top_p=__UpperCAmelCase , top_k=__UpperCAmelCase , max_input_length=1024 , device="""cuda:0""" , )[0] return (answer, support_list) st.title("""Long Form Question Answering with ELI5""") # Start sidebar _A = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>""" _A = """ <html> <head> <style> .img-container { padding-left: 90px; padding-right: 90px; padding-top: 50px; padding-bottom: 50px; background-color: #f0f3f9; } </style> </head> <body> <span class=\"img-container\"> <!-- Inline parent element --> %s </span> </body> </html> """ % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia _A = """ This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html). First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset, a pre-processed fixed snapshot of Wikipedia. """ st.sidebar.markdown(description, unsafe_allow_html=True) _A = [ """Answer the question""", """View the retrieved document only""", """View the most similar ELI5 question and answer""", """Show me everything, please!""", ] _A = st.sidebar.checkbox("""Demo options""") if demo_options: _A = st.sidebar.selectbox( """""", action_list, index=3, ) _A = action_list.index(action_st) _A = st.sidebar.selectbox( """""", ["""Show full text of passages""", """Show passage section titles"""], index=0, ) _A = show_type == """Show full text of passages""" else: _A = 3 _A = True _A = st.sidebar.checkbox("""Retrieval options""") if retrieval_options: _A = """ ### Information retriever options The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs. The answer is then generated by sequence to sequence model which takes the question and retrieved document as input. """ st.sidebar.markdown(retriever_info) _A = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""]) _A = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""]) else: _A = """wiki40b""" _A = """dense""" _A = """beam""" _A = 2 _A = 6_4 _A = 2_5_6 _A = None _A = None _A = st.sidebar.checkbox("""Generation options""") if generate_options: _A = """ ### Answer generation options The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large) weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with **beam** search, or **sample** from the decoder's output probabilities. """ st.sidebar.markdown(generate_info) _A = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""]) _A = st.sidebar.slider( """Minimum generation length""", min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None ) _A = st.sidebar.slider( """Maximum generation length""", min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None ) if sampled == "beam": _A = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: _A = st.sidebar.slider( """Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) _A = st.sidebar.slider( """Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) _A = None # start main text _A = [ """<MY QUESTION>""", """How do people make chocolate?""", """Why do we get a fever when we are sick?""", """How can different animals perceive different colors?""", """What is natural language processing?""", """What's the best way to treat a sunburn?""", """What exactly are vitamins ?""", """How does nuclear energy provide electricity?""", """What's the difference between viruses and bacteria?""", """Why are flutes classified as woodwinds when most of them are made out of metal ?""", """Why do people like drinking coffee even though it tastes so bad?""", """What happens when wine ages? How does it make the wine taste better?""", """If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""", """How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""", """How does New Zealand have so many large bird predators?""", ] _A = st.selectbox( """What would you like to ask? ---- select <MY QUESTION> to enter a new query""", questions_list, index=1, ) if question_s == "<MY QUESTION>": _A = st.text_input("""Enter your question here:""", """""") else: _A = question_s if st.button("""Show me!"""): if action in [0, 1, 3]: if index_type == "mixed": _A , _A = make_support(question, source=wiki_source, method="""dense""", n_results=1_0) _A , _A = make_support(question, source=wiki_source, method="""sparse""", n_results=1_0) _A = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] _A = support_list[:1_0] _A = """<P> """ + """ <P> """.join([res[-1] for res in support_list]) else: _A , _A = make_support(question, source=wiki_source, method=index_type, n_results=1_0) if action in [0, 3]: _A , _A = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == """sampled"""), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("""### The model generated answer is:""") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""") for i, res in enumerate(support_list): _A = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_""")) _A = res[1].strip() if sec_titles == "": _A = """[{}]({})""".format(res[0], wiki_url) else: _A = sec_titles.split(""" & """) _A = """ & """.join( ["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list] ) st.markdown( """{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( """> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True ) if action in [2, 3]: _A = find_nearest_training(question) _A = nn_train_list[0] st.markdown( """--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""]) ) _A = [ """{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""])) for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""])) if i == 0 or sc > 2 ] st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st))) _A = """ --- **Disclaimer** *The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system. Evaluating biases of such a model and ensuring factual generations are still very much open research problems. Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.* """ st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
242
0
from __future__ import annotations from typing import Any class SCREAMING_SNAKE_CASE (UpperCAmelCase ): pass class SCREAMING_SNAKE_CASE : def __init__( self : List[str] , a : Any )-> None: """simple docstring""" lowercase__ = data lowercase__ = None def __iter__( self : str )-> Optional[Any]: """simple docstring""" lowercase__ = self lowercase__ = [] while node: if node in visited: raise ContainsLoopError visited.append(a ) yield node.data lowercase__ = node.next_node @property def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> bool: """simple docstring""" try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": lowercase_ = Node(1) lowercase_ = Node(2) lowercase_ = Node(3) lowercase_ = Node(4) print(root_node.has_loop) # False lowercase_ = root_node.next_node print(root_node.has_loop) # True lowercase_ = Node(5) lowercase_ = Node(6) lowercase_ = Node(5) lowercase_ = Node(6) print(root_node.has_loop) # False lowercase_ = Node(1) print(root_node.has_loop) # False
269
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class SCREAMING_SNAKE_CASE (UpperCAmelCase ): def __init__( self : List[str] , a : Callable , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[dict] = None , a : Optional[int] = None , **a : str , )-> Tuple: """simple docstring""" super().__init__( features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , ) lowercase__ = Generator( cache_dir=a , features=a , generator=a , gen_kwargs=a , **a , ) def SCREAMING_SNAKE_CASE_ ( self : Any )-> Dict: """simple docstring""" if self.streaming: lowercase__ = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: lowercase__ = None lowercase__ = None lowercase__ = None lowercase__ = None self.builder.download_and_prepare( download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , ) lowercase__ = self.builder.as_dataset( split='train' , verification_mode=a , in_memory=self.keep_in_memory ) return dataset
269
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" a_ =StableDiffusionInstructPixaPixPipeline a_ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""} a_ =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS a_ =IMAGE_TO_IMAGE_IMAGE_PARAMS a_ =IMAGE_TO_IMAGE_IMAGE_PARAMS def _lowercase ( self : Tuple ) -> Tuple: torch.manual_seed(0 ) __lowerCamelCase : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) __lowerCamelCase : Tuple = PNDMScheduler(skip_prk_steps=_snake_case ) torch.manual_seed(0 ) __lowerCamelCase : Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) __lowerCamelCase : Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) __lowerCamelCase : Optional[int] = CLIPTextModel(_snake_case ) __lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) __lowerCamelCase : List[str] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def _lowercase ( self : str , _a : List[Any] , _a : Any=0 ) -> Any: __lowerCamelCase : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case ) __lowerCamelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCamelCase : List[Any] = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ) if str(_snake_case ).startswith('mps' ): __lowerCamelCase : Tuple = torch.manual_seed(_snake_case ) else: __lowerCamelCase : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) __lowerCamelCase : Optional[Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'image_guidance_scale': 1, 'output_type': 'numpy', } return inputs def _lowercase ( self : Any ) -> Any: __lowerCamelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase : Optional[Any] = self.get_dummy_components() __lowerCamelCase : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**_snake_case ) __lowerCamelCase : int = sd_pipe.to(_snake_case ) sd_pipe.set_progress_bar_config(disable=_snake_case ) __lowerCamelCase : Dict = self.get_dummy_inputs(_snake_case ) __lowerCamelCase : Any = sd_pipe(**_snake_case ).images __lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCamelCase : Union[str, Any] = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowercase ( self : str ) -> str: __lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase : Any = self.get_dummy_components() __lowerCamelCase : List[Any] = StableDiffusionInstructPixaPixPipeline(**_snake_case ) __lowerCamelCase : str = sd_pipe.to(_snake_case ) sd_pipe.set_progress_bar_config(disable=_snake_case ) __lowerCamelCase : str = self.get_dummy_inputs(_snake_case ) __lowerCamelCase : Tuple = 'french fries' __lowerCamelCase : Any = sd_pipe(**_snake_case , negative_prompt=_snake_case ) __lowerCamelCase : Optional[Any] = output.images __lowerCamelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCamelCase : str = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowercase ( self : Tuple ) -> int: __lowerCamelCase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase : int = self.get_dummy_components() __lowerCamelCase : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**_snake_case ) __lowerCamelCase : Optional[Any] = sd_pipe.to(_snake_case ) sd_pipe.set_progress_bar_config(disable=_snake_case ) __lowerCamelCase : str = self.get_dummy_inputs(_snake_case ) __lowerCamelCase : List[Any] = [inputs['prompt']] * 2 __lowerCamelCase : Dict = np.array(inputs['image'] ).astype(np.floataa ) / 255.0 __lowerCamelCase : Dict = torch.from_numpy(_snake_case ).unsqueeze(0 ).to(_snake_case ) __lowerCamelCase : Union[str, Any] = image / 2 + 0.5 __lowerCamelCase : str = image.permute(0 , 3 , 1 , 2 ) __lowerCamelCase : int = image.repeat(2 , 1 , 1 , 1 ) __lowerCamelCase : Tuple = sd_pipe(**_snake_case ).images __lowerCamelCase : str = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) __lowerCamelCase : Union[str, Any] = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowercase ( self : str ) -> Union[str, Any]: __lowerCamelCase : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator __lowerCamelCase : Optional[Any] = self.get_dummy_components() __lowerCamelCase : Any = EulerAncestralDiscreteScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' ) __lowerCamelCase : List[str] = StableDiffusionInstructPixaPixPipeline(**_snake_case ) __lowerCamelCase : int = sd_pipe.to(_snake_case ) sd_pipe.set_progress_bar_config(disable=_snake_case ) __lowerCamelCase : Any = self.get_dummy_inputs(_snake_case ) __lowerCamelCase : Dict = sd_pipe(**_snake_case ).images __lowerCamelCase : List[Any] = image[0, -3:, -3:, -1] __lowerCamelCase : int = [round(_snake_case , 4 ) for x in image_slice.flatten().tolist()] print(','.join([str(_snake_case ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) __lowerCamelCase : Optional[int] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowercase ( self : Optional[Any] ) -> Union[str, Any]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def _lowercase ( self : Any ) -> Optional[int]: __lowerCamelCase : int = self.get_dummy_components() __lowerCamelCase : Tuple = StableDiffusionInstructPixaPixPipeline(**_snake_case ) __lowerCamelCase : List[Any] = VaeImageProcessor(do_resize=_snake_case , do_normalize=_snake_case ) __lowerCamelCase : Any = pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) __lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs_by_type(_snake_case , input_image_type='pt' ) )[0] __lowerCamelCase : List[str] = components['vae'] __lowerCamelCase : Tuple = self.get_dummy_inputs_by_type(_snake_case , input_image_type='pt' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): __lowerCamelCase : List[Any] = vae.encode(inputs[image_param] ).latent_dist.mode() __lowerCamelCase : Dict = pipe(**_snake_case )[0] __lowerCamelCase : Optional[int] = np.abs(out - out_latents_inputs ).max() self.assertLess(_snake_case , 1e-4 , 'passing latents as image input generate different result from passing image' ) @slow @require_torch_gpu class lowerCamelCase_ ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Union[str, Any] ) -> Optional[int]: super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self : Optional[Any] , _a : Optional[int]=0 ) -> Dict: __lowerCamelCase : List[Any] = torch.manual_seed(_snake_case ) __lowerCamelCase : Optional[Any] = load_image( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' ) __lowerCamelCase : List[str] = { 'prompt': 'turn him into a cyborg', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'image_guidance_scale': 1.0, 'output_type': 'numpy', } return inputs def _lowercase ( self : Tuple ) -> Dict: __lowerCamelCase : int = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=_snake_case ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) pipe.enable_attention_slicing() __lowerCamelCase : Any = self.get_inputs() __lowerCamelCase : Optional[Any] = pipe(**_snake_case ).images __lowerCamelCase : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __lowerCamelCase : Union[str, Any] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _lowercase ( self : str ) -> List[str]: __lowerCamelCase : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=_snake_case ) __lowerCamelCase : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) pipe.enable_attention_slicing() __lowerCamelCase : Dict = self.get_inputs() __lowerCamelCase : Union[str, Any] = pipe(**_snake_case ).images __lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __lowerCamelCase : List[str] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _lowercase ( self : Tuple ) -> int: __lowerCamelCase : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=_snake_case ) __lowerCamelCase : int = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) pipe.enable_attention_slicing() __lowerCamelCase : Optional[int] = self.get_inputs() __lowerCamelCase : Dict = pipe(**_snake_case ).images __lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __lowerCamelCase : Optional[int] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _lowercase ( self : List[Any] ) -> Optional[Any]: __lowerCamelCase : Optional[int] = 0 def callback_fn(_a : int , _a : str , _a : Optional[int] ) -> None: __lowerCamelCase : Any = True nonlocal number_of_steps number_of_steps += 1 if step == 1: __lowerCamelCase : Optional[int] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) __lowerCamelCase : int = latents[0, -3:, -3:, -1] __lowerCamelCase : Dict = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: __lowerCamelCase : Tuple = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) __lowerCamelCase : Dict = latents[0, -3:, -3:, -1] __lowerCamelCase : Optional[int] = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 __lowerCamelCase : List[str] = False __lowerCamelCase : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=_snake_case , torch_dtype=torch.floataa ) __lowerCamelCase : int = pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) pipe.enable_attention_slicing() __lowerCamelCase : Union[str, Any] = self.get_inputs() pipe(**_snake_case , callback=_snake_case , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def _lowercase ( self : List[Any] ) -> List[str]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __lowerCamelCase : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=_snake_case , torch_dtype=torch.floataa ) __lowerCamelCase : int = pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __lowerCamelCase : Tuple = self.get_inputs() __lowerCamelCase : List[str] = pipe(**_snake_case ) __lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def _lowercase ( self : Union[str, Any] ) -> Tuple: __lowerCamelCase : Tuple = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 __lowerCamelCase : List[Any] = inputs['image'].resize((504, 504) ) __lowerCamelCase : Dict = 'timbrooks/instruct-pix2pix' __lowerCamelCase : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( _snake_case , safety_checker=_snake_case , ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) pipe.enable_attention_slicing() __lowerCamelCase : Tuple = pipe(**_snake_case ) __lowerCamelCase : List[Any] = output.images[0] __lowerCamelCase : str = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) __lowerCamelCase : Any = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
208
from __future__ import annotations import math def _UpperCAmelCase ( snake_case ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _UpperCAmelCase ( snake_case ): """simple docstring""" _lowerCAmelCase = str(snake_case ) _lowerCAmelCase = [n] for i in range(1 , len(snake_case ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def _UpperCAmelCase ( snake_case ): """simple docstring""" if len(str(snake_case ) ) > 3: if not is_prime(int(str(snake_case )[-3:] ) ) or not is_prime(int(str(snake_case )[:3] ) ): return False return True def _UpperCAmelCase ( snake_case = 11 ): """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = 13 while len(snake_case ) != count: if validate(snake_case ): _lowerCAmelCase = list_truncated_nums(snake_case ) if all(is_prime(snake_case ) for i in list_nums ): list_truncated_primes.append(snake_case ) num += 2 return list_truncated_primes def _UpperCAmelCase ( ): """simple docstring""" return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(f"{sum(compute_truncated_primes(11)) = }")
82
0
from abc import ABC, abstractmethod from argparse import ArgumentParser class __UpperCAmelCase ( _lowerCamelCase ): @staticmethod @abstractmethod def lowerCamelCase ( lowerCAmelCase_ ): """simple docstring""" raise NotImplementedError() @abstractmethod def lowerCamelCase ( self ): """simple docstring""" raise NotImplementedError()
353
'''simple docstring''' from __future__ import annotations from collections import namedtuple def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> tuple: _snake_case = namedtuple('result' , 'name value' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('Only one argument must be 0' ) elif power < 0: raise ValueError( 'Power cannot be negative in any electrical/electronics system' ) elif voltage == 0: return result('voltage' , power / current ) elif current == 0: return result('current' , power / voltage ) elif power == 0: return result('power' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
160
0
from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ): if attention_mask is None: __a = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class _A : UpperCamelCase__ : List[str] = OPTConfig UpperCamelCase__ : Any = {} UpperCamelCase__ : Optional[int] = '''gelu''' def __init__( self : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Union[str, Any]=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : List[str]=99 , __SCREAMING_SNAKE_CASE : str=16 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : List[str]=4 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : int=20 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : Optional[int]=0 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : str=16 , ): '''simple docstring''' __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_labels __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = eos_token_id __a = pad_token_id __a = bos_token_id __a = embed_dim __a = word_embed_proj_dim __a = False def _lowerCamelCase ( self : str): '''simple docstring''' __a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) __a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1) __a = tf.concat([input_ids, eos_tensor] , axis=1) __a = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **self.config_updates , ) __a = prepare_opt_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) return config, inputs_dict def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any): '''simple docstring''' __a = TFOPTModel(config=__SCREAMING_SNAKE_CASE) __a = inputs_dict['''input_ids'''] __a = input_ids[:1, :] __a = inputs_dict['''attention_mask'''][:1, :] __a = 1 # first forward pass __a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE) __a , __a = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __a = ids_tensor((self.batch_size, 3) , config.vocab_size) __a = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta) # append to next input_ids and __a = tf.concat([input_ids, next_tokens] , axis=-1) __a = tf.concat([attention_mask, next_attn_mask] , axis=-1) __a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)[0] __a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE)[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1]) # select random slice __a = int(ids_tensor((1,) , output_from_past.shape[-1])) __a = output_from_no_past[:, -3:, random_slice_idx] __a = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=1E-3) @require_tf class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : Optional[Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () UpperCamelCase__ : int = (TFOPTForCausalLM,) if is_tf_available() else () UpperCamelCase__ : str = ( {'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {} ) UpperCamelCase__ : Optional[Any] = False UpperCamelCase__ : Optional[int] = False UpperCamelCase__ : Union[str, Any] = False UpperCamelCase__ : int = 10 def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = TFOPTModelTester(self) __a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' self.config_tester.run_common_tests() def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a , __a = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(__SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int): if hasattr(__SCREAMING_SNAKE_CASE , '''weight'''): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(__SCREAMING_SNAKE_CASE , '''weight'''): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings __a = model_class(config=__SCREAMING_SNAKE_CASE) __a = _get_word_embedding_weight(__SCREAMING_SNAKE_CASE , model.get_input_embeddings()) __a = _get_word_embedding_weight(__SCREAMING_SNAKE_CASE , model.get_output_embeddings()) # reshape the embeddings model.resize_token_embeddings(__SCREAMING_SNAKE_CASE) __a = _get_word_embedding_weight(__SCREAMING_SNAKE_CASE , model.get_input_embeddings()) __a = _get_word_embedding_weight(__SCREAMING_SNAKE_CASE , model.get_output_embeddings()) # check that the resized embeddings size matches the desired size. __a = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , __SCREAMING_SNAKE_CASE) # check that weights remain the same after resizing __a = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0: __a = False self.assertTrue(__SCREAMING_SNAKE_CASE) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , __SCREAMING_SNAKE_CASE) __a = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0: __a = False self.assertTrue(__SCREAMING_SNAKE_CASE) def __snake_case ( _UpperCAmelCase ): return tf.constant(_UpperCAmelCase , dtype=tf.intaa ) @require_tf class _A ( unittest.TestCase ): UpperCamelCase__ : Optional[int] = 99 def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = tf.ones((4, 1) , dtype=tf.intaa) * 2 __a = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3) + 3, eos_column_vector] , axis=1) __a = input_ids.shape[0] __a = OPTConfig( vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class _A ( unittest.TestCase ): @slow def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = TFOPTModel.from_pretrained('''facebook/opt-350m''') __a = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]]) __a = tf.not_equal(__SCREAMING_SNAKE_CASE , model.config.pad_token_id) with tf.GradientTape(): __a = model(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE).last_hidden_state __a = (1, 11, 512) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE) __a = tf.constant( [[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]]) self.assertTrue(np.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=4E-3)) __a = tf.function(__SCREAMING_SNAKE_CASE , jit_compile=__SCREAMING_SNAKE_CASE) __a = xla_generate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)[0] self.assertTrue(np.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=4E-2)) @require_tf @slow class _A ( unittest.TestCase ): def _lowerCamelCase ( self : int): '''simple docstring''' super().setUp() __a = '''facebook/opt-350m''' def _lowerCamelCase ( self : Tuple): '''simple docstring''' __a = TFOPTForCausalLM.from_pretrained(self.path_model) __a = GPTaTokenizer.from_pretrained(self.path_model) __a = [ '''Today is a beautiful day and I want to''', '''In the city of''', '''Paris is the capital of France and''', '''Computers and mobile phones have taken''', ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False __a = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''' , padding=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE) __a = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1) __a = tf.constant( [ [1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70], [-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22], [0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03], [6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77], ]) self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-4)) __a = tf.function(__SCREAMING_SNAKE_CASE , jit_compile=__SCREAMING_SNAKE_CASE) __a = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1) self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-4)) @require_tf @slow class _A ( unittest.TestCase ): @property def _lowerCamelCase ( self : List[str]): '''simple docstring''' return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = '''facebook/opt-125m''' __a = [ '''Today is a beautiful day and I want to''', '''In the city of New York, the city''', '''Paris is the capital of France and the capital''', '''Computers and mobile phones have taken over the''', ] __a = [] __a = GPTaTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE) __a = TFOPTForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE) for prompt in self.prompts: __a = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''').input_ids __a = model.generate(__SCREAMING_SNAKE_CASE , max_length=10) __a = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE) predicted_outputs += generated_string self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str): '''simple docstring''' __a = '''facebook/opt-350m''' __a = GPTaTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE) __a = TFOPTForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE) __a = '''left''' # use different length sentences to test batching __a = [ '''Hello, my dog is a little''', '''Today, I''', ] __a = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''' , padding=__SCREAMING_SNAKE_CASE) __a = inputs['''input_ids'''] __a = model.generate(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=inputs['''attention_mask''']) __a = tokenizer(sentences[0] , return_tensors='''tf''').input_ids __a = model.generate(input_ids=__SCREAMING_SNAKE_CASE) __a = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs['''attention_mask'''][-1] , tf.intaa)) __a = tokenizer(sentences[1] , return_tensors='''tf''').input_ids __a = model.generate(input_ids=__SCREAMING_SNAKE_CASE , max_length=model.config.max_length - num_paddings) __a = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE) __a = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE) __a = tokenizer.decode(output_padded[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE) __a = [ '''Hello, my dog is a little bit of a dork.\nI\'m a little bit''', '''Today, I was in the middle of a conversation with a friend about the''', ] self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) self.assertListEqual(__SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence]) def _lowerCamelCase ( self : int): '''simple docstring''' __a = '''facebook/opt-350m''' __a = [ '''Today is a beautiful day and I want to''', '''In the city of San Francisco, the city''', '''Paris is the capital of France and the capital''', '''Computers and mobile phones have taken over the''', ] __a = [] __a = GPTaTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE) __a = TFOPTForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE) for prompt in self.prompts: __a = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='''tf''').input_ids __a = model.generate(__SCREAMING_SNAKE_CASE , max_length=10) __a = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE) predicted_outputs += generated_string self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
49
from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case :Union[str, Any] = logging.get_logger(__name__) __snake_case :Any = { '''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''', } class _A ( __UpperCAmelCase ): UpperCamelCase__ : Optional[int] = '''switch_transformers''' UpperCamelCase__ : Optional[Any] = ['''past_key_values'''] UpperCamelCase__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str=32_128 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : Optional[int]=2_048 , __SCREAMING_SNAKE_CASE : List[str]=64 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Tuple=8 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.01 , __SCREAMING_SNAKE_CASE : Dict="float32" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : int=128 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : int=1E-6 , __SCREAMING_SNAKE_CASE : Dict=0.0_01 , __SCREAMING_SNAKE_CASE : List[str]=0.0_01 , __SCREAMING_SNAKE_CASE : List[Any]=1.0 , __SCREAMING_SNAKE_CASE : Optional[int]="relu" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : int=0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , **__SCREAMING_SNAKE_CASE : Dict , ): '''simple docstring''' __a = vocab_size __a = d_model __a = d_kv __a = d_ff __a = num_sparse_encoder_layers __a = num_layers __a = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __a = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: __a = self.num_layers // self.num_sparse_encoder_layers else: __a = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: __a = self.num_decoder_layers // self.num_sparse_decoder_layers else: __a = self.num_decoder_layers # HACK: this will create 0 sparse layers __a = num_heads __a = num_experts __a = expert_capacity __a = router_bias __a = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}') __a = router_dtype __a = router_ignore_padding_tokens __a = relative_attention_num_buckets __a = relative_attention_max_distance __a = dropout_rate __a = layer_norm_epsilon __a = initializer_factor __a = feed_forward_proj __a = use_cache __a = add_router_probs __a = router_z_loss_coef __a = router_aux_loss_coef __a = self.feed_forward_proj.split('''-''') __a = act_info[-1] __a = act_info[0] == '''gated''' if len(__SCREAMING_SNAKE_CASE) > 1 and act_info[0] != "gated" or len(__SCREAMING_SNAKE_CASE) > 2: raise ValueError( F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''') # for backwards compatibility if feed_forward_proj == "gated-gelu": __a = '''gelu_new''' super().__init__( pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
49
1
"""simple docstring""" import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) UpperCAmelCase = logging.getLogger() def lowerCamelCase () -> int: lowercase :str = argparse.ArgumentParser() parser.add_argument('''-f''') lowercase :Optional[Any] = parser.parse_args() return args.f class __magic_name__ ( __UpperCAmelCase ): def __snake_case ( self : List[Any] ): '''simple docstring''' lowercase :Dict = logging.StreamHandler(sys.stdout ) logger.addHandler(snake_case__ ) def __snake_case ( self : int , snake_case__ : List[str] ): '''simple docstring''' lowercase :str = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , '''run_glue_deebert.py''' ) with patch.object(snake_case__ , '''argv''' , snake_case__ ): lowercase :Optional[Any] = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(snake_case__ , 0.6_66 ) @slow @require_torch_non_multi_gpu def __snake_case ( self : Tuple ): '''simple docstring''' lowercase :Optional[Any] = ''' --model_type roberta --model_name_or_path roberta-base --task_name MRPC --do_train --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --max_seq_length 128 --per_gpu_eval_batch_size=1 --per_gpu_train_batch_size=8 --learning_rate 2e-4 --num_train_epochs 3 --overwrite_output_dir --seed 42 --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --save_steps 0 --overwrite_cache --eval_after_first_stage '''.split() self.run_and_check(snake_case__ ) lowercase :Any = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --eval_each_highway --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(snake_case__ ) lowercase :Union[str, Any] = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --early_exit_entropy 0.1 --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(snake_case__ )
366
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase = logging.get_logger(__name__) # TODO: upload to AWS UpperCAmelCase = { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json''' ), } class __magic_name__ ( __UpperCAmelCase ): __A : List[Any] = "retribert" def __init__( self : Dict , snake_case__ : Union[str, Any]=3_0_5_2_2 , snake_case__ : Union[str, Any]=7_6_8 , snake_case__ : Optional[Any]=8 , snake_case__ : int=1_2 , snake_case__ : Optional[int]=3_0_7_2 , snake_case__ : Any="gelu" , snake_case__ : str=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Union[str, Any]=2 , snake_case__ : Dict=0.02 , snake_case__ : Tuple=1e-1_2 , snake_case__ : Any=True , snake_case__ : Tuple=1_2_8 , snake_case__ : Optional[int]=0 , **snake_case__ : List[str] , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , **snake_case__ ) lowercase :Any = vocab_size lowercase :Optional[Any] = hidden_size lowercase :str = num_hidden_layers lowercase :List[str] = num_attention_heads lowercase :Union[str, Any] = hidden_act lowercase :Any = intermediate_size lowercase :str = hidden_dropout_prob lowercase :str = attention_probs_dropout_prob lowercase :Optional[Any] = max_position_embeddings lowercase :Union[str, Any] = type_vocab_size lowercase :Any = initializer_range lowercase :int = layer_norm_eps lowercase :List[str] = share_encoders lowercase :Union[str, Any] = projection_dim
172
0
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = ['''image_processor''', '''tokenizer'''] SCREAMING_SNAKE_CASE__ : Dict = '''LayoutLMv2ImageProcessor''' SCREAMING_SNAKE_CASE__ : Dict = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''') def __init__( self :Union[str, Any] , lowerCAmelCase__ :int=None , lowerCAmelCase__ :List[str]=None , **lowerCAmelCase__ :Optional[int] ) -> Union[str, Any]: if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , lowerCAmelCase__ , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop('''feature_extractor''' ) __SCREAMING_SNAKE_CASE : Optional[int] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase__ , lowerCAmelCase__ ) def __call__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase__ :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowerCAmelCase__ :Union[List[List[int]], List[List[List[int]]]] = None , lowerCAmelCase__ :Optional[Union[List[int], List[List[int]]]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ :Union[bool, str, TruncationStrategy] = None , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , **lowerCAmelCase__ :List[Any] , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( '''You cannot provide bounding boxes ''' '''if you initialized the image processor with apply_ocr set to True.''' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( '''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' ) # first, apply the image processor __SCREAMING_SNAKE_CASE : str = self.image_processor(images=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): __SCREAMING_SNAKE_CASE : Any = [text] # add batch dimension (as the image processor always adds a batch dimension) __SCREAMING_SNAKE_CASE : List[str] = features['''words'''] __SCREAMING_SNAKE_CASE : str = self.tokenizer( text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , ) # add pixel values __SCREAMING_SNAKE_CASE : Optional[Any] = features.pop('''pixel_values''' ) if return_overflowing_tokens is True: __SCREAMING_SNAKE_CASE : List[str] = self.get_overflowing_images(lowerCAmelCase__ , encoded_inputs['''overflow_to_sample_mapping'''] ) __SCREAMING_SNAKE_CASE : List[str] = images return encoded_inputs def __magic_name__( self :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] ) -> Optional[int]: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image __SCREAMING_SNAKE_CASE : Dict = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): raise ValueError( '''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got''' f''' {len(lowerCAmelCase__ )} and {len(lowerCAmelCase__ )}''' ) return images_with_overflow def __magic_name__( self :Any , *lowerCAmelCase__ :Optional[int] , **lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]: return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) def __magic_name__( self :Union[str, Any] , *lowerCAmelCase__ :Tuple , **lowerCAmelCase__ :Union[str, Any] ) -> str: return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) @property def __magic_name__( self :Union[str, Any] ) -> List[Any]: return ["input_ids", "bbox", "attention_mask", "image"] @property def __magic_name__( self :str ) -> Dict: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase__ , ) return self.image_processor_class @property def __magic_name__( self :Tuple ) -> List[Any]: warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase__ , ) return self.image_processor
9
import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline A_ : List[str] = { 'n_samples': 64, 'horizon': 32, 'num_inference_steps': 20, 'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network 'scale_grad_by_std': True, 'scale': 0.1, 'eta': 0.0, 't_grad_cutoff': 2, 'device': 'cpu', } if __name__ == "__main__": A_ : Optional[int] = 'hopper-medium-v2' A_ : List[Any] = gym.make(env_name) A_ : str = ValueGuidedRLPipeline.from_pretrained( 'bglick13/hopper-medium-v2-value-function-hor32', env=env, ) env.seed(0) A_ : List[Any] = env.reset() A_ : Optional[int] = 0 A_ : str = 0 A_ : Optional[Any] = 1000 A_ : Union[str, Any] = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy A_ : Tuple = pipeline(obs, planning_horizon=32) # execute action in environment A_ , A_ , A_ , A_ : Dict = env.step(denorm_actions) A_ : List[str] = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:''' f''' {total_score}''' ) # save observations for rendering rollout.append(next_observation.copy()) A_ : int = next_observation except KeyboardInterrupt: pass print(f'''Total reward: {total_reward}''')
192
0
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> bool: '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = f'''Input value of [number={number}] must be an integer''' raise TypeError(SCREAMING_SNAKE_CASE__ ) if number < 0: return False SCREAMING_SNAKE_CASE__ : Optional[int] = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
191
import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": _lowerCamelCase : Union[str, Any] = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: '''))) print('''Googling.....''') _lowerCamelCase : Any = f"https://www.google.com/search?q={query}&num=100" _lowerCamelCase : Optional[Any] = requests.get( url, headers={'''User-Agent''': str(UserAgent().random)}, ) try: _lowerCamelCase : Union[str, Any] = ( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''yuRUbf'''}) .find('''a''') .get('''href''') ) except AttributeError: _lowerCamelCase : Optional[Any] = parse_qs( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''kCrYT'''}) .find('''a''') .get('''href''') )['''url'''][0] webbrowser.open(link)
191
1
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]: """simple docstring""" global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: A__ = mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) else: A__ = max( mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) , mf_knapsack(i - 1 , lowercase_ , lowercase_ , j - wt[i - 1] ) + val[i - 1] , ) A__ = val return f[i][j] def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: A__ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: A__ = dp[i - 1][w_] return dp[n][w_], dp def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Dict: """simple docstring""" if not (isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) A__ = len(lowercase_ ) if num_items != len(lowercase_ ): A__ = ( '''The number of weights must be the same as the number of values.\n''' f"""But got {num_items} weights and {len(lowercase_ )} values""" ) raise ValueError(lowercase_ ) for i in range(lowercase_ ): if not isinstance(wt[i] , lowercase_ ): A__ = ( '''All weights must be integers but got weight of ''' f"""type {type(wt[i] )} at index {i}""" ) raise TypeError(lowercase_ ) A__ , A__ = knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) A__ = set() _construct_solution(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) return optimal_val, example_optional_set def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple: """simple docstring""" if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(lowercase_ , lowercase_ , i - 1 , lowercase_ , lowercase_ ) else: optimal_set.add(lowercase_ ) _construct_solution(lowercase_ , lowercase_ , i - 1 , j - wt[i - 1] , lowercase_ ) if __name__ == "__main__": _lowerCamelCase : str = [3, 2, 4, 4] _lowerCamelCase : Tuple = [4, 3, 2, 3] _lowerCamelCase : int = 4 _lowerCamelCase : Any = 6 _lowerCamelCase : Optional[int] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] _lowerCamelCase , _lowerCamelCase : str = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 _lowerCamelCase , _lowerCamelCase : str = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
14
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class __lowercase ( tf.keras.layers.Layer ): def __init__(self , A , A , A = None , A = None ): super().__init__() lowerCamelCase_ : List[Any] = pad_token_id lowerCamelCase_ : Union[str, Any] = max_length lowerCamelCase_ : List[Any] = vocab lowerCamelCase_ : Optional[int] = merges lowerCamelCase_ : List[str] = BytePairTokenizer(A , A , sequence_length=A ) @classmethod def UpperCAmelCase__ (cls , A , *A , **A ): lowerCamelCase_ : int = [''' '''.join(A ) for m in tokenizer.bpe_ranks.keys()] lowerCamelCase_ : Dict = tokenizer.get_vocab() return cls(A , A , *A , **A ) @classmethod def UpperCAmelCase__ (cls , A , *A , **A ): lowerCamelCase_ : Optional[int] = GPTaTokenizer.from_pretrained(A , *A , **A ) return cls.from_tokenizer(A , *A , **A ) @classmethod def UpperCAmelCase__ (cls , A ): return cls(**A ) def UpperCAmelCase__ (self ): return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def UpperCAmelCase__ (self , A , A = None ): lowerCamelCase_ : str = self.tf_tokenizer(A ) lowerCamelCase_ : Any = tf.ones_like(A ) if self.pad_token_id is not None: # pad the tokens up to max length lowerCamelCase_ : Tuple = max_length if max_length is not None else self.max_length if max_length is not None: lowerCamelCase_, lowerCamelCase_ : Tuple = pad_model_inputs( A , max_seq_length=A , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
318
0
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor __UpperCAmelCase : Optional[Any] = random.Random() def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None) -> Any: if rng is None: __snake_case: str = global_rng __snake_case: Dict = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values @require_torch @require_torchaudio class __snake_case ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[int] , A : Any , A : Dict=7 , A : Optional[Any]=400 , A : Optional[Any]=2_000 , A : Dict=24 , A : Dict=24 , A : int=0.0 , A : List[str]=16_000 , A : Optional[Any]=True , A : Union[str, Any]=True , ): __snake_case: List[Any] = parent __snake_case: int = batch_size __snake_case: Dict = min_seq_length __snake_case: int = max_seq_length __snake_case: int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __snake_case: Optional[int] = feature_size __snake_case: Optional[Any] = num_mel_bins __snake_case: Dict = padding_value __snake_case: Union[str, Any] = sampling_rate __snake_case: List[str] = return_attention_mask __snake_case: List[str] = do_normalize def UpperCAmelCase__ ( self : List[Any] ): return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCAmelCase__ ( self : Optional[int] , A : Union[str, Any]=False , A : List[Any]=False ): def _flatten(A : int ): return list(itertools.chain(*_A ) ) if equal_length: __snake_case: Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size __snake_case: Tuple = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __snake_case: Union[str, Any] = [np.asarray(_A ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __snake_case ( snake_case__ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = SpeechaTextFeatureExtractor if is_speech_available() else None def UpperCAmelCase__ ( self : Tuple ): __snake_case: Optional[Any] = SpeechaTextFeatureExtractionTester(self ) def UpperCAmelCase__ ( self : Dict , A : Union[str, Any] ): self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1E-3 ) ) def UpperCAmelCase__ ( self : List[str] ): __snake_case: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __snake_case: Any = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __snake_case: Union[str, Any] = [np.asarray(_A ) for speech_input in speech_inputs] # Test feature size __snake_case: List[str] = feature_extractor(_A , padding=_A , return_tensors="""np""" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input __snake_case: Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features __snake_case: Any = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) # Test batched __snake_case: List[Any] = feature_extractor(_A , return_tensors="""np""" ).input_features __snake_case: int = feature_extractor(_A , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. __snake_case: List[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)] __snake_case: List[str] = np.asarray(_A ) __snake_case: List[Any] = feature_extractor(_A , return_tensors="""np""" ).input_features __snake_case: Union[str, Any] = feature_extractor(_A , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(_A , _A ): self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) ) def UpperCAmelCase__ ( self : Any ): __snake_case: int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __snake_case: Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __snake_case: List[Any] = ["""longest""", """max_length""", """do_not_pad"""] __snake_case: str = [None, 16, None] for max_length, padding in zip(_A , _A ): __snake_case: Optional[Any] = feature_extractor( _A , padding=_A , max_length=_A , return_attention_mask=_A ) __snake_case: Dict = inputs.input_features __snake_case: Tuple = inputs.attention_mask __snake_case: List[str] = [np.sum(_A ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def UpperCAmelCase__ ( self : Optional[Any] ): __snake_case: int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __snake_case: int = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __snake_case: int = ["""longest""", """max_length""", """do_not_pad"""] __snake_case: Optional[Any] = [None, 16, None] for max_length, padding in zip(_A , _A ): __snake_case: str = feature_extractor( _A , max_length=_A , padding=_A , return_tensors="""np""" , return_attention_mask=_A ) __snake_case: Optional[Any] = inputs.input_features __snake_case: Optional[Any] = inputs.attention_mask __snake_case: Union[str, Any] = [np.sum(_A ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def UpperCAmelCase__ ( self : List[str] ): __snake_case: Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __snake_case: Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __snake_case: int = feature_extractor( _A , padding="""max_length""" , max_length=4 , truncation=_A , return_tensors="""np""" , return_attention_mask=_A , ) __snake_case: Dict = inputs.input_features __snake_case: List[Any] = inputs.attention_mask __snake_case: Union[str, Any] = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def UpperCAmelCase__ ( self : List[str] ): __snake_case: Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __snake_case: List[str] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __snake_case: Dict = feature_extractor( _A , padding="""longest""" , max_length=4 , truncation=_A , return_tensors="""np""" , return_attention_mask=_A , ) __snake_case: Optional[int] = inputs.input_features __snake_case: int = inputs.attention_mask __snake_case: str = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 4, 24) ) __snake_case: Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __snake_case: List[Any] = feature_extractor( _A , padding="""longest""" , max_length=16 , truncation=_A , return_tensors="""np""" , return_attention_mask=_A , ) __snake_case: Any = inputs.input_features __snake_case: List[Any] = inputs.attention_mask __snake_case: Any = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 6, 24) ) def UpperCAmelCase__ ( self : Dict ): import torch __snake_case: Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __snake_case: Optional[int] = np.random.rand(100 , 32 ).astype(np.floataa ) __snake_case: str = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __snake_case: str = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) __snake_case: Any = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def UpperCAmelCase__ ( self : Tuple , A : Tuple ): from datasets import load_dataset __snake_case: List[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech __snake_case: List[str] = ds.sort("""id""" ).select(range(_A ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def UpperCAmelCase__ ( self : List[str] ): __snake_case: str = np.array([ -1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241, -1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128, -1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625, ] ) # fmt: on __snake_case: Dict = self._load_datasamples(1 ) __snake_case: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __snake_case: List[Any] = feature_extractor(_A , return_tensors="""pt""" ).input_features self.assertEquals(input_features.shape , (1, 584, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30] , _A , atol=1E-4 ) )
364
import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging __UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name class __snake_case ( __lowerCamelCase ): '''simple docstring''' def __init__( self : List[Any] , A : AutoencoderKL , A : CLIPTextModel , A : CLIPTokenizer , A : UNetaDConditionModel , A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , A : StableDiffusionSafetyChecker , A : CLIPImageProcessor , ): super().__init__() self.register_modules( vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , ) def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __snake_case: Tuple = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(A ) def UpperCAmelCase__ ( self : str ): self.enable_attention_slicing(A ) @torch.no_grad() def __call__( self : List[str] , A : Union[str, List[str]] , A : int = 512 , A : int = 512 , A : int = 50 , A : float = 7.5 , A : Optional[Union[str, List[str]]] = None , A : Optional[int] = 1 , A : float = 0.0 , A : Optional[torch.Generator] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A : int = 1 , A : Optional[torch.FloatTensor] = None , **A : Optional[Any] , ): if isinstance(A , A ): __snake_case: int = 1 elif isinstance(A , A ): __snake_case: Optional[Any] = len(A ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(A )}.''' ) # get prompt text embeddings __snake_case: Tuple = self.tokenizer( A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) __snake_case: Any = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __snake_case: List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) __snake_case: Dict = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: __snake_case: Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method __snake_case , __snake_case , __snake_case: List[Any] = text_embeddings.shape __snake_case: Tuple = text_embeddings.repeat(1 , A , 1 ) __snake_case: Dict = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __snake_case: List[str] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __snake_case: List[str] if negative_prompt is None: __snake_case: Any = [""""""] elif type(A ) is not type(A ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !=''' f''' {type(A )}.''' ) elif isinstance(A , A ): __snake_case: List[str] = [negative_prompt] elif batch_size != len(A ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' """ the batch size of `prompt`.""" ) else: __snake_case: str = negative_prompt __snake_case: Any = text_input_ids.shape[-1] __snake_case: Dict = self.tokenizer( A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , ) __snake_case: Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __snake_case: Optional[Any] = uncond_embeddings.shape[1] __snake_case: str = uncond_embeddings.repeat(A , A , 1 ) __snake_case: List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __snake_case: Any = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __snake_case: Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) __snake_case: List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) __snake_case: Optional[Any] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps __snake_case: Any = torch.randn( A , generator=A , device="""cpu""" , dtype=A ).to(self.device ) __snake_case: Tuple = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to( self.device ) else: __snake_case: Dict = torch.randn( A , generator=A , device=self.device , dtype=A ) __snake_case: Optional[int] = torch.randn(A , generator=A , device=self.device , dtype=A ) else: if latents_reference.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) __snake_case: Optional[int] = latents_reference.to(self.device ) __snake_case: List[str] = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images __snake_case: int = (latents_shape[3] - latents_shape_reference[3]) // 2 __snake_case: Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2 __snake_case: int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx __snake_case: Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy __snake_case: List[Any] = 0 if dx < 0 else dx __snake_case: Dict = 0 if dy < 0 else dy __snake_case: List[str] = max(-dx , 0 ) __snake_case: int = max(-dy , 0 ) # import pdb # pdb.set_trace() __snake_case: List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(A ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand __snake_case: str = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler __snake_case: Optional[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __snake_case: Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __snake_case: int = {} if accepts_eta: __snake_case: Optional[Any] = eta for i, t in enumerate(self.progress_bar(A ) ): # expand the latents if we are doing classifier free guidance __snake_case: str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __snake_case: Dict = self.scheduler.scale_model_input(A , A ) # predict the noise residual __snake_case: List[Any] = self.unet(A , A , encoder_hidden_states=A ).sample # perform guidance if do_classifier_free_guidance: __snake_case , __snake_case: Any = noise_pred.chunk(2 ) __snake_case: Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 __snake_case: str = self.scheduler.step(A , A , A , **A ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(A , A , A ) __snake_case: Optional[int] = 1 / 0.1_8215 * latents __snake_case: List[Any] = self.vae.decode(A ).sample __snake_case: str = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __snake_case: Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: __snake_case: List[Any] = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to( self.device ) __snake_case , __snake_case: List[str] = self.safety_checker( images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: __snake_case: Optional[int] = None if output_type == "pil": __snake_case: Tuple = self.numpy_to_pil(A ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
293
0
'''simple docstring''' import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate __SCREAMING_SNAKE_CASE :Optional[int] = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow('''''', '''|''', '''|'''), datarow=DataRow('''''', '''|''', '''|'''), padding=1, with_header_hide=None, ) __SCREAMING_SNAKE_CASE :Any = [] __SCREAMING_SNAKE_CASE :int = [] __SCREAMING_SNAKE_CASE :str = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}} __SCREAMING_SNAKE_CASE :Any = [ { '''type''': '''header''', '''text''': { '''type''': '''plain_text''', '''text''': F"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results", '''emoji''': True, }, } ] __SCREAMING_SNAKE_CASE :List[str] = 0 for log in Path().glob('''*.log'''): __SCREAMING_SNAKE_CASE :Optional[int] = 0 with open(log, '''r''') as f: for line in f: __SCREAMING_SNAKE_CASE :Tuple = json.loads(line) if line.get('''nodeid''', '''''') != "": __SCREAMING_SNAKE_CASE :Optional[int] = line['''nodeid'''] if line.get('''duration''', None) is not None: __SCREAMING_SNAKE_CASE :List[str] = F"{line['duration']:.4f}" if line.get('''outcome''', '''''') == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split('''_''')[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) __SCREAMING_SNAKE_CASE :int = [] log.unlink() __SCREAMING_SNAKE_CASE :Any = '''''' __SCREAMING_SNAKE_CASE :Union[str, Any] = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += F"*{name[1:]}: {num_failed} failed test*\n" else: message += F"*{name[1:]}: {num_failed} failed tests*\n" __SCREAMING_SNAKE_CASE :int = [] __SCREAMING_SNAKE_CASE :Any = {} for test in failed_tests: __SCREAMING_SNAKE_CASE :Any = test[0].split('''::''') __SCREAMING_SNAKE_CASE :Optional[Any] = data[0].split('''/''')[-1] if data[0] not in filesafailed: __SCREAMING_SNAKE_CASE :List[Any] = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) __SCREAMING_SNAKE_CASE :Dict = [test[0] for test in failed_table] __SCREAMING_SNAKE_CASE :int = list(set(files)) # Count number of instances in failed_tests __SCREAMING_SNAKE_CASE :List[Any] = [] for file in individual_files: table.append([file, len(filesafailed[file])]) __SCREAMING_SNAKE_CASE :int = tabulate( table, headers=['''Test Location''', '''Num Failed'''], tablefmt=hf_table_format, stralign='''right''', ) message += F"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3000: __SCREAMING_SNAKE_CASE :Union[str, Any] = '''Too many failed tests, please see the full report in the Action results.''' __SCREAMING_SNAKE_CASE :List[Any] = len(err) + 10 __SCREAMING_SNAKE_CASE :List[str] = message[: 3000 - offset] + F"\n...\n```\n{err}" print(F"### {message}") else: __SCREAMING_SNAKE_CASE :Optional[Any] = '''No failed tests! 🤗''' print(F"## {message}") payload.append(no_error_payload) if os.environ.get('''TEST_TYPE''', '''''') != "": from slack_sdk import WebClient __SCREAMING_SNAKE_CASE :List[Any] = WebClient(token=os.environ['''SLACK_API_TOKEN''']) if message != "No failed tests! 🤗": __SCREAMING_SNAKE_CASE :Optional[Any] = { '''type''': '''section''', '''text''': { '''type''': '''mrkdwn''', '''text''': message, }, } payload.append(md_report) __SCREAMING_SNAKE_CASE :str = { '''type''': '''section''', '''text''': { '''type''': '''mrkdwn''', '''text''': '''*For more details:*''', }, '''accessory''': { '''type''': '''button''', '''text''': { '''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True, }, '''url''': F"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } payload.append(action_button) __SCREAMING_SNAKE_CASE :List[Any] = { '''type''': '''context''', '''elements''': [ { '''type''': '''plain_text''', '''text''': F"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}", } ], } payload.append(date_report) __SCREAMING_SNAKE_CASE :Dict = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload) __SCREAMING_SNAKE_CASE :List[Any] = response.data['''ts'''] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name __SCREAMING_SNAKE_CASE :Tuple = '''''' for i, row in enumerate(test_failures): if row[0] != test_class: __SCREAMING_SNAKE_CASE :Any = row[0] else: __SCREAMING_SNAKE_CASE :Union[str, Any] = '''''' __SCREAMING_SNAKE_CASE :Dict = { '''type''': '''section''', '''text''': { '''type''': '''mrkdwn''', '''text''': F"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```", }, } client.chat_postMessage( channel='''#accelerate-ci-daily''', thread_ts=ts, blocks=[payload], )
22
"""simple docstring""" import re from filelock import FileLock try: import nltk A__ : Any = True except (ImportError, ModuleNotFoundError): A__ : str = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def _snake_case ( lowerCamelCase__ : str ) -> str: re.sub("<n>" , "" , lowerCamelCase__ ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(lowerCamelCase__ ) )
144
0
'''simple docstring''' import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCamelCase_ : def __init__( self : Union[str, Any] , _A : Optional[int] , _A : List[str]=13 , _A : Union[str, Any]=30 , _A : int=2 , _A : List[str]=3 , _A : int=True , _A : Optional[Any]=True , _A : Optional[Any]=32 , _A : Optional[Any]=5 , _A : List[Any]=4 , _A : List[str]=37 , _A : Optional[Any]="gelu" , _A : Optional[int]=0.1 , _A : Optional[Any]=0.1 , _A : Union[str, Any]=10 , _A : Dict=0.0_2 , _A : Dict=None , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = parent UpperCAmelCase__ : str = batch_size UpperCAmelCase__ : Tuple = image_size UpperCAmelCase__ : List[Any] = patch_size UpperCAmelCase__ : Tuple = num_channels UpperCAmelCase__ : str = is_training UpperCAmelCase__ : Optional[int] = use_labels UpperCAmelCase__ : Optional[Any] = hidden_size UpperCAmelCase__ : List[Any] = num_hidden_layers UpperCAmelCase__ : Tuple = num_attention_heads UpperCAmelCase__ : List[Any] = intermediate_size UpperCAmelCase__ : Optional[Any] = hidden_act UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase__ : Dict = attention_probs_dropout_prob UpperCAmelCase__ : List[str] = type_sequence_label_size UpperCAmelCase__ : Optional[Any] = initializer_range UpperCAmelCase__ : List[str] = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase__ : int = (image_size // patch_size) ** 2 UpperCAmelCase__ : Union[str, Any] = num_patches + 1 def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : Optional[int] = None if self.use_labels: UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Tuple = self.get_config() return config, pixel_values, labels def lowercase_ ( self : Optional[int] ): '''simple docstring''' return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def lowercase_ ( self : Tuple , _A : List[Any] , _A : Dict , _A : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = ViTMSNModel(config=_A ) model.to(_A ) model.eval() UpperCAmelCase__ : Tuple = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : Optional[Any] , _A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = ViTMSNForImageClassification(_A ) model.to(_A ) model.eval() UpperCAmelCase__ : List[str] = model(_A , labels=_A ) print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' ) print('''Labels: {labels}''' ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase__ : Any = 1 UpperCAmelCase__ : Union[str, Any] = ViTMSNForImageClassification(_A ) model.to(_A ) model.eval() UpperCAmelCase__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase__ : Union[str, Any] = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = self.prepare_config_and_inputs() UpperCAmelCase__ : List[Any] = config_and_inputs UpperCAmelCase__ : List[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ): lowerCAmelCase__ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () lowerCAmelCase__ = ( {'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification} if is_torch_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = ViTMSNModelTester(self ) UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def lowercase_ ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMSN does not use inputs_embeds''' ) def lowercase_ ( self : Dict ): '''simple docstring''' pass def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase__ : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , nn.Linear ) ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Optional[int] = model_class(_A ) UpperCAmelCase__ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : Union[str, Any] = [*signature.parameters.keys()] UpperCAmelCase__ : int = ['pixel_values'] self.assertListEqual(arg_names[:1] , _A ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) @slow def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : str = ViTMSNModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def a__ ( ) -> Optional[int]: UpperCAmelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): @cached_property def lowercase_ ( self : Tuple ): '''simple docstring''' return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None @slow def lowercase_ ( self : int ): '''simple docstring''' torch.manual_seed(2 ) UpperCAmelCase__ : Optional[int] = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(_A ) UpperCAmelCase__ : Dict = self.default_image_processor UpperCAmelCase__ : Union[str, Any] = prepare_img() UpperCAmelCase__ : int = image_processor(images=_A , return_tensors='''pt''' ).to(_A ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Dict = model(**_A ) # verify the logits UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , _A ) UpperCAmelCase__ : Optional[int] = torch.tensor([-0.0_8_0_3, -0.4_4_5_4, -0.2_3_7_5] ).to(_A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
356
'''simple docstring''' import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''label_embs_concat''': '''label_embeddings_concat''', '''mask_emb''': '''masked_spec_embed''', '''spk_proj''': '''speaker_proj''', } UpperCamelCase__ = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', '''label_embeddings_concat''', '''speaker_proj''', '''layer_norm_for_extract''', ] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: for attribute in key.split('''.''' ): UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) if weight_type is not None: UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape else: UpperCAmelCase__ : Union[str, Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCAmelCase__ : int = value elif weight_type == "weight_g": UpperCAmelCase__ : Dict = value elif weight_type == "weight_v": UpperCAmelCase__ : List[str] = value elif weight_type == "bias": UpperCAmelCase__ : Tuple = value else: UpperCAmelCase__ : Tuple = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : Dict = fairseq_model.state_dict() UpperCAmelCase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase__ : Any = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , ) UpperCAmelCase__ : str = True else: for key, mapped_key in MAPPING.items(): UpperCAmelCase__ : List[str] = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue UpperCAmelCase__ : Optional[int] = True if "*" in mapped_key: UpperCAmelCase__ : str = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2] UpperCAmelCase__ : Optional[int] = mapped_key.replace('''*''' , lowerCAmelCase__ ) if "weight_g" in name: UpperCAmelCase__ : List[str] = '''weight_g''' elif "weight_v" in name: UpperCAmelCase__ : Dict = '''weight_v''' elif "bias" in name: UpperCAmelCase__ : Optional[int] = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase__ : Tuple = '''weight''' else: UpperCAmelCase__ : Optional[Any] = None set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: UpperCAmelCase__ : Tuple = full_name.split('''conv_layers.''' )[-1] UpperCAmelCase__ : Optional[Any] = name.split('''.''' ) UpperCAmelCase__ : Union[str, Any] = int(items[0] ) UpperCAmelCase__ : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCAmelCase__ : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCAmelCase__ : Optional[int] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) UpperCAmelCase__ : List[str] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCAmelCase__ : Optional[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ) -> Any: if config_path is not None: UpperCAmelCase__ : Any = UniSpeechSatConfig.from_pretrained(lowerCAmelCase__ ) else: UpperCAmelCase__ : int = UniSpeechSatConfig() UpperCAmelCase__ : Tuple = '''''' if is_finetuned: UpperCAmelCase__ : Optional[int] = UniSpeechSatForCTC(lowerCAmelCase__ ) else: UpperCAmelCase__ : List[Any] = UniSpeechSatForPreTraining(lowerCAmelCase__ ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) UpperCAmelCase__ : Union[str, Any] = model[0].eval() recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ ) hf_wavavec.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) UpperCamelCase__ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
299
0
"""simple docstring""" import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class __UpperCamelCase ( a__ ): lowerCamelCase : List[str] =ComputeEnvironment.AMAZON_SAGEMAKER lowerCamelCase : str =True lowerCamelCase : Union[str, Any] ="""ml.p3.2xlarge""" lowerCamelCase : str ="""accelerate_sagemaker_execution_role""" lowerCamelCase : int ="""hf-sm""" lowerCamelCase : int ="""us-east-1""" lowerCamelCase : Tuple =1 lowerCamelCase : Any ="""accelerate-sagemaker-1""" lowerCamelCase : str ="""1.6""" lowerCamelCase : Tuple ="""4.4""" lowerCamelCase : Optional[int] ="""train.py""" lowerCamelCase : Optional[Any] =[ """--model_name_or_path""", """bert""", """--do_train""", """False""", """--epochs""", """3""", """--learning_rate""", """5e-5""", """--max_steps""", """50.5""", ] lowerCamelCase : Union[str, Any] =[ """--model_name_or_path""", """bert""", """--do_train""", """--do_test""", """False""", """--do_predict""", """--epochs""", """3""", """--learning_rate""", """5e-5""", """--max_steps""", """50.5""", ] class __UpperCamelCase ( unittest.TestCase ): def __a ( self ) -> List[str]: # If no defaults are changed, `to_kwargs` returns an empty dict. a : str = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args["model_name_or_path"] , lowerCAmelCase__ ) assert isinstance(converted_args["do_train"] , lowerCAmelCase__ ) assert isinstance(converted_args["epochs"] , lowerCAmelCase__ ) assert isinstance(converted_args["learning_rate"] , lowerCAmelCase__ ) assert isinstance(converted_args["max_steps"] , lowerCAmelCase__ ) with pytest.raises(lowerCAmelCase__ ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
105
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging A : Dict = logging.get_logger(__name__) def __lowerCamelCase ( __a :int=None , __a :Optional[Any]=None ) -> int: """simple docstring""" return field(default_factory=lambda: default , metadata=__a ) @dataclass class A : '''simple docstring''' __lowerCamelCase : List[str] = list_field( default=[] , metadata={ '''help''': ( '''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version''' ''' of all available models''' ) } , ) __lowerCamelCase : List[int] = list_field( default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} ) __lowerCamelCase : List[int] = list_field( default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Use FP16 to accelerate inference.'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Benchmark training of model'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Verbose memory tracing'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory''' } , ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Trace memory line by line'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save result to a CSV file'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Save all print statements in a log file'''} ) __lowerCamelCase : bool = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to print environment information'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use''' ''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled''' ''' for debugging / testing and on TPU.''' ) } , ) __lowerCamelCase : str = field( default=F'''inference_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , ) __lowerCamelCase : str = field( default=F'''inference_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , ) __lowerCamelCase : str = field( default=F'''train_time_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , ) __lowerCamelCase : str = field( default=F'''train_memory_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , ) __lowerCamelCase : str = field( default=F'''env_info_{round(time() )}.csv''' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , ) __lowerCamelCase : str = field( default=F'''log_{round(time() )}.csv''' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , ) __lowerCamelCase : int = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} ) __lowerCamelCase : bool = field( default=SCREAMING_SNAKE_CASE , metadata={ '''help''': ( '''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain''' ''' model weights.''' ) } , ) def a_ ( self : Dict ) -> Union[str, Any]: """simple docstring""" warnings.warn( f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils' """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" , __lowerCAmelCase , ) def a_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def a_ ( self : Tuple ) -> List[str]: """simple docstring""" if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = ['bert-base-cased'].""" ) return self.models @property def a_ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
274
0
"""simple docstring""" import baseaa def snake_case__ ( __lowerCamelCase : str ): """simple docstring""" return baseaa.baaencode(string.encode('''utf-8''' ) ) def snake_case__ ( __lowerCamelCase : bytes ): """simple docstring""" return baseaa.baadecode(__lowerCamelCase ).decode('''utf-8''' ) if __name__ == "__main__": _lowercase : str = "Hello World!" _lowercase : Any = baseaa_encode(test) print(encoded) _lowercase : Dict = baseaa_decode(encoded) print(decoded)
355
"""simple docstring""" import unittest import numpy as np import torch from diffusers import VersatileDiffusionImageVariationPipeline from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device _lowercase : Tuple = False class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' pass @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def snake_case ( self : List[Any] )-> Dict: lowerCamelCase__ : str =VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ) pipe.to(lowerCamelCase ) pipe.set_progress_bar_config(disable=lowerCamelCase ) lowerCamelCase__ : int =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) lowerCamelCase__ : Dict =torch.manual_seed(0 ) lowerCamelCase__ : str =pipe( image=lowerCamelCase, generator=lowerCamelCase, guidance_scale=7.5, num_inference_steps=50, output_type='''numpy''', ).images lowerCamelCase__ : Dict =image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowerCamelCase__ : List[Any] =np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
272
0
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def _a ( *SCREAMING_SNAKE_CASE : str ) -> Dict: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): __lowerCAmelCase: Dict = list(SCREAMING_SNAKE_CASE ) for i in range(len(SCREAMING_SNAKE_CASE ) ): __lowerCAmelCase: List[Any] = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def _a ( SCREAMING_SNAKE_CASE : Exception ) -> bool: """simple docstring""" __lowerCAmelCase: Tuple = [ 'CUDA out of memory.', # CUDA OOM 'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU 'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM ] if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def _a ( SCREAMING_SNAKE_CASE : callable = None , SCREAMING_SNAKE_CASE : int = 1_28 ) -> Union[str, Any]: """simple docstring""" if function is None: return functools.partial(SCREAMING_SNAKE_CASE , starting_batch_size=SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Dict = starting_batch_size def decorator(*SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : int ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() __lowerCAmelCase: str = list(inspect.signature(SCREAMING_SNAKE_CASE ).parameters.keys() ) # Guard against user error if len(SCREAMING_SNAKE_CASE ) < (len(SCREAMING_SNAKE_CASE ) + 1): __lowerCAmelCase: Dict = ', '.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( f'''Batch size was passed into `{function.__name__}` as the first argument when called.''' f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' ) while True: if batch_size == 0: raise RuntimeError('No executable batch size found, reached zero.' ) try: return function(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) except Exception as e: if should_reduce_batch_size(SCREAMING_SNAKE_CASE ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
322
import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase: Union[str, Any] = int(SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: List[str] = t // 36_00, (t // 60) % 60, t % 60 return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}''' def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=3_00 ) -> int: """simple docstring""" return f''' <div> {prefix} <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress> {label} </div> ''' def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase: List[str] = '<table border="1" class="dataframe">\n' html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += f''' <th>{i}</th>\n''' html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: __lowerCAmelCase: List[Any] = f'''{elt:.6f}''' if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else str(SCREAMING_SNAKE_CASE ) html_code += f''' <td>{elt}</td>\n''' html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class A_ : _lowercase : str = 5 _lowercase : str = 0.2 def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional["NotebookTrainingTracker"] = None , UpperCAmelCase : int = 3_0_0 , ) -> List[Any]: __lowerCAmelCase: List[str] = total __lowerCAmelCase: Optional[int] = '' if prefix is None else prefix __lowerCAmelCase: int = leave __lowerCAmelCase: List[str] = parent __lowerCAmelCase: Optional[Any] = width __lowerCAmelCase: List[str] = None __lowerCAmelCase: Dict = None __lowerCAmelCase: List[str] = None def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : bool = False , UpperCAmelCase : str = None ) -> Optional[int]: __lowerCAmelCase: int = value if comment is not None: __lowerCAmelCase: Any = comment if self.last_value is None: __lowerCAmelCase: List[Any] = time.time() __lowerCAmelCase: Any = value __lowerCAmelCase: List[str] = None __lowerCAmelCase: Dict = self.warmup __lowerCAmelCase: List[str] = 1 self.update_bar(UpperCAmelCase ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ): if self.first_calls > 0: self.first_calls -= 1 __lowerCAmelCase: Union[str, Any] = time.time() __lowerCAmelCase: str = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: __lowerCAmelCase: Dict = self.elapsed_time / (value - self.start_value) else: __lowerCAmelCase: int = None if value >= self.total: __lowerCAmelCase: Any = self.total __lowerCAmelCase: str = None if not self.leave: self.close() elif self.average_time_per_item is not None: __lowerCAmelCase: List[str] = self.average_time_per_item * (self.total - value) self.update_bar(UpperCAmelCase ) __lowerCAmelCase: Tuple = value __lowerCAmelCase: int = current_time if self.average_time_per_item is None: __lowerCAmelCase: Optional[int] = 1 else: __lowerCAmelCase: Optional[Any] = max(int(self.update_every / self.average_time_per_item ) , 1 ) def UpperCAmelCase ( self : int , UpperCAmelCase : Any , UpperCAmelCase : List[Any]=None ) -> Union[str, Any]: __lowerCAmelCase: int = ' ' * (len(str(self.total ) ) - len(str(UpperCAmelCase ) )) + str(UpperCAmelCase ) if self.elapsed_time is None: __lowerCAmelCase: Dict = F'''[{spaced_value}/{self.total} : < :''' elif self.predicted_remaining is None: __lowerCAmelCase: str = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}''' else: __lowerCAmelCase: Any = ( F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <''' F''' {format_time(self.predicted_remaining )}''' ) self.label += F''', {1/self.average_time_per_item:.2f} it/s''' self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]''' self.display() def UpperCAmelCase ( self : Any ) -> Optional[Any]: __lowerCAmelCase: Any = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: __lowerCAmelCase: Tuple = disp.display(disp.HTML(self.html_code ) , display_id=UpperCAmelCase ) else: self.output.update(disp.HTML(self.html_code ) ) def UpperCAmelCase ( self : str ) -> Optional[Any]: if self.parent is None and self.output is not None: self.output.update(disp.HTML('' ) ) class A_ ( snake_case__ ): def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any]=None ) -> Any: super().__init__(UpperCAmelCase ) __lowerCAmelCase: Tuple = None if column_names is None else [column_names] __lowerCAmelCase: Union[str, Any] = None def UpperCAmelCase ( self : Union[str, Any] ) -> Any: __lowerCAmelCase: str = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: __lowerCAmelCase: Optional[Any] = disp.display(disp.HTML(self.html_code ) , display_id=UpperCAmelCase ) else: self.output.update(disp.HTML(self.html_code ) ) def UpperCAmelCase ( self : Tuple , UpperCAmelCase : List[Any] ) -> Dict: if self.inner_table is None: __lowerCAmelCase: List[str] = [list(values.keys() ), list(values.values() )] else: __lowerCAmelCase: Any = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(UpperCAmelCase ) __lowerCAmelCase: List[Any] = columns self.inner_table.append([values[c] for c in columns] ) def UpperCAmelCase ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any]=None , UpperCAmelCase : List[str]=3_0_0 ) -> List[Any]: __lowerCAmelCase: Union[str, Any] = NotebookProgressBar(UpperCAmelCase , prefix=UpperCAmelCase , parent=self , width=UpperCAmelCase ) return self.child_bar def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]: __lowerCAmelCase: Tuple = None self.display() class A_ ( snake_case__ ): def __init__( self : Any ) -> List[str]: __lowerCAmelCase: int = None __lowerCAmelCase: Optional[int] = None __lowerCAmelCase: str = False def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , **UpperCAmelCase : Tuple ) -> str: __lowerCAmelCase: Tuple = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step' __lowerCAmelCase: Optional[int] = 0 __lowerCAmelCase: Any = 0 __lowerCAmelCase: Tuple = [self.first_column] + ['Training Loss'] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append('Validation Loss' ) __lowerCAmelCase: List[Any] = NotebookTrainingTracker(state.max_steps , UpperCAmelCase ) def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Union[str, Any] ) -> Any: __lowerCAmelCase: Union[str, Any] = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}''' self.training_tracker.update( state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , ) __lowerCAmelCase: Any = False def UpperCAmelCase ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int=None , **UpperCAmelCase : Dict ) -> List[Any]: if not has_length(UpperCAmelCase ): return if self.prediction_bar is None: if self.training_tracker is not None: __lowerCAmelCase: int = self.training_tracker.add_child(len(UpperCAmelCase ) ) else: __lowerCAmelCase: List[str] = NotebookProgressBar(len(UpperCAmelCase ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ) -> Union[str, Any]: if self.prediction_bar is not None: self.prediction_bar.close() __lowerCAmelCase: Any = None def UpperCAmelCase ( self : str , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=None , **UpperCAmelCase : Optional[Any] ) -> Optional[Any]: # Only for when there is no evaluation if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: __lowerCAmelCase: Union[str, Any] = {'Training Loss': logs['loss']} # First column is necessarily Step sine we're not in epoch eval strategy __lowerCAmelCase: Dict = state.global_step self.training_tracker.write_line(UpperCAmelCase ) def UpperCAmelCase ( self : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple=None , **UpperCAmelCase : int ) -> List[str]: if self.training_tracker is not None: __lowerCAmelCase: Dict = {'Training Loss': 'No log', 'Validation Loss': 'No log'} for log in reversed(state.log_history ): if "loss" in log: __lowerCAmelCase: List[str] = log['loss'] break if self.first_column == "Epoch": __lowerCAmelCase: int = int(state.epoch ) else: __lowerCAmelCase: Tuple = state.global_step __lowerCAmelCase: Optional[int] = 'eval' for k in metrics: if k.endswith('_loss' ): __lowerCAmelCase: Union[str, Any] = re.sub(R'\_loss$' , '' , UpperCAmelCase ) __lowerCAmelCase: Optional[Any] = metrics.pop('total_flos' , UpperCAmelCase ) __lowerCAmelCase: str = metrics.pop('epoch' , UpperCAmelCase ) __lowerCAmelCase: int = metrics.pop(F'''{metric_key_prefix}_runtime''' , UpperCAmelCase ) __lowerCAmelCase: List[Any] = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , UpperCAmelCase ) __lowerCAmelCase: List[str] = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , UpperCAmelCase ) __lowerCAmelCase: Tuple = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , UpperCAmelCase ) for k, v in metrics.items(): if k == F'''{metric_key_prefix}_loss''': __lowerCAmelCase: Tuple = v else: __lowerCAmelCase: int = k.split('_' ) __lowerCAmelCase: List[Any] = ' '.join([part.capitalize() for part in splits[1:]] ) __lowerCAmelCase: List[Any] = v self.training_tracker.write_line(UpperCAmelCase ) self.training_tracker.remove_child() __lowerCAmelCase: List[str] = None # Evaluation takes a long time so we should force the next update. __lowerCAmelCase: str = True def UpperCAmelCase ( self : int , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ) -> Optional[int]: self.training_tracker.update( state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=UpperCAmelCase ) __lowerCAmelCase: Union[str, Any] = None
322
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A : Any = { 'configuration_jukebox': [ 'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'JukeboxConfig', 'JukeboxPriorConfig', 'JukeboxVQVAEConfig', ], 'tokenization_jukebox': ['JukeboxTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[int] = [ 'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'JukeboxModel', 'JukeboxPreTrainedModel', 'JukeboxVQVAE', 'JukeboxPrior', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
359
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Optional[Any] = logging.get_logger(__name__) A : Tuple = { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class A ( UpperCAmelCase__ ): '''simple docstring''' A__ = '''convbert''' def __init__(self : str , _UpperCAmelCase : Union[str, Any]=3_0522 , _UpperCAmelCase : Dict=768 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : str=3072 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : str=512 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Tuple=1E-1_2 , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : str=768 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Optional[Any]=9 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : List[Any] , ) -> List[str]: """simple docstring""" super().__init__( pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , ) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = embedding_size lowercase__ = head_ratio lowercase__ = conv_kernel_size lowercase__ = num_groups lowercase__ = classifier_dropout class A ( UpperCAmelCase__ ): '''simple docstring''' @property def lowerCamelCase__ (self : Any ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""} else: lowercase__ = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
146
0
"""simple docstring""" def _lowercase ( __snake_case ,__snake_case ) -> int: return int((input_a, input_a).count(1 ) != 0 ) def _lowercase ( ) -> None: assert or_gate(0 ,0 ) == 0 assert or_gate(0 ,1 ) == 1 assert or_gate(1 ,0 ) == 1 assert or_gate(1 ,1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
269
"""simple docstring""" import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class A__ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> str: """simple docstring""" __lowerCAmelCase : Optional[Any] = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small") __lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("google/mt5-small") __lowerCAmelCase : Tuple = tokenizer("Hello there" , return_tensors="np").input_ids __lowerCAmelCase : Dict = tokenizer("Hi I am" , return_tensors="np").input_ids __lowerCAmelCase : str = shift_tokens_right(_SCREAMING_SNAKE_CASE , model.config.pad_token_id , model.config.decoder_start_token_id) __lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE).logits __lowerCAmelCase : int = optax.softmax_cross_entropy(_SCREAMING_SNAKE_CASE , onehot(_SCREAMING_SNAKE_CASE , logits.shape[-1])).mean() __lowerCAmelCase : List[str] = -(labels.shape[-1] * loss.item()) __lowerCAmelCase : str = -84.9127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
269
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __SCREAMING_SNAKE_CASE : List[Any] = { '''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''], '''processing_layoutlmv2''': ['''LayoutLMv2Processor'''], '''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Tuple = ['''LayoutLMv2TokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Any = ['''LayoutLMv2FeatureExtractor'''] __SCREAMING_SNAKE_CASE : Optional[Any] = ['''LayoutLMv2ImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : str = [ '''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LayoutLMv2ForQuestionAnswering''', '''LayoutLMv2ForSequenceClassification''', '''LayoutLMv2ForTokenClassification''', '''LayoutLMv2Layer''', '''LayoutLMv2Model''', '''LayoutLMv2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
370
"""simple docstring""" import os import sys __SCREAMING_SNAKE_CASE : Dict = os.path.join(os.path.dirname(__file__), '''src''') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __SCREAMING_SNAKE_CASE : List[Any] = [ '''torch''', '''numpy''', '''tokenizers''', '''filelock''', '''requests''', '''tqdm''', '''regex''', '''sentencepiece''', '''sacremoses''', '''importlib_metadata''', '''huggingface_hub''', ] @add_start_docstrings(AutoConfig.__doc__ ) def lowerCAmelCase_( *lowercase_ : Any , **lowercase_ : Optional[Any] ) -> Optional[Any]: return AutoConfig.from_pretrained(*lowercase_ , **lowercase_ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def lowerCAmelCase_( *lowercase_ : List[Any] , **lowercase_ : List[Any] ) -> Tuple: return AutoTokenizer.from_pretrained(*lowercase_ , **lowercase_ ) @add_start_docstrings(AutoModel.__doc__ ) def lowerCAmelCase_( *lowercase_ : Optional[Any] , **lowercase_ : Optional[Any] ) -> int: return AutoModel.from_pretrained(*lowercase_ , **lowercase_ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def lowerCAmelCase_( *lowercase_ : Dict , **lowercase_ : List[Any] ) -> int: return AutoModelForCausalLM.from_pretrained(*lowercase_ , **lowercase_ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def lowerCAmelCase_( *lowercase_ : str , **lowercase_ : List[Any] ) -> Any: return AutoModelForMaskedLM.from_pretrained(*lowercase_ , **lowercase_ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def lowerCAmelCase_( *lowercase_ : Tuple , **lowercase_ : List[Any] ) -> Dict: return AutoModelForSequenceClassification.from_pretrained(*lowercase_ , **lowercase_ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def lowerCAmelCase_( *lowercase_ : List[str] , **lowercase_ : List[str] ) -> str: return AutoModelForQuestionAnswering.from_pretrained(*lowercase_ , **lowercase_ )
73
0
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import ( BaseOutput, OptionalDependencyNotAvailable, is_flax_available, is_k_diffusion_available, is_k_diffusion_version, is_onnx_available, is_torch_available, is_transformers_available, is_transformers_version, ) @dataclass class __magic_name__ ( _UpperCamelCase ): """simple docstring""" __UpperCamelCase = 42 __UpperCamelCase = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_cycle_diffusion import CycleDiffusionPipeline from .pipeline_stable_diffusion import StableDiffusionPipeline from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from .pipeline_stable_unclip import StableUnCLIPPipeline from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline from .safety_checker import StableDiffusionSafetyChecker from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline else: from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.26.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionPixaPixZeroPipeline, ) else: from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline try: if not ( is_torch_available() and is_transformers_available() and is_k_diffusion_available() and is_k_diffusion_version('''>=''', '''0.0.12''') ): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline try: if not (is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_onnx_objects import * # noqa F403 else: from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline if is_transformers_available() and is_flax_available(): import flax @flax.struct.dataclass class __magic_name__ ( _UpperCamelCase ): """simple docstring""" __UpperCamelCase = 42 __UpperCamelCase = 42 from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
300
"""simple docstring""" A = 9.80665 def __A ( a_ :float , a_ :float , a_ :float = g) -> float: if fluid_density <= 0: raise ValueError('''Impossible fluid density''') if volume < 0: raise ValueError('''Impossible Object volume''') if gravity <= 0: raise ValueError('''Impossible Gravity''') return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
160
0
from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE_:str = logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE__ ) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' def __init__( self, *lowerCamelCase__, **lowerCamelCase__ ): super().__init__(*lowerCamelCase__, **lowerCamelCase__ ) requires_backends(self, """decord""" ) self.check_model_type(lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=None ): A : List[Any] = {} if frame_sampling_rate is not None: A : Union[str, Any] = frame_sampling_rate if num_frames is not None: A : str = num_frames A : Tuple = {} if top_k is not None: A : Dict = top_k return preprocess_params, {}, postprocess_params def __call__( self, lowerCamelCase__, **lowerCamelCase__ ): return super().__call__(lowerCamelCase__, **lowerCamelCase__ ) def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None, lowerCamelCase__=1 ): if num_frames is None: A : List[Any] = self.model.config.num_frames if video.startswith("""http://""" ) or video.startswith("""https://""" ): A : Any = BytesIO(requests.get(lowerCamelCase__ ).content ) A : Any = VideoReader(lowerCamelCase__ ) videoreader.seek(0 ) A : int = 0 A : Optional[int] = num_frames * frame_sampling_rate - 1 A : Optional[Any] = np.linspace(lowerCamelCase__, lowerCamelCase__, num=lowerCamelCase__, dtype=np.intaa ) A : List[Any] = videoreader.get_batch(lowerCamelCase__ ).asnumpy() A : Dict = list(lowerCamelCase__ ) A : Union[str, Any] = self.image_processor(lowerCamelCase__, return_tensors=self.framework ) return model_inputs def _lowerCAmelCase ( self, lowerCamelCase__ ): A : Optional[Any] = self.model(**lowerCamelCase__ ) return model_outputs def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=5 ): if top_k > self.model.config.num_labels: A : int = self.model.config.num_labels if self.framework == "pt": A : List[Any] = model_outputs.logits.softmax(-1 )[0] A : Union[str, Any] = probs.topk(lowerCamelCase__ ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) A : Optional[int] = scores.tolist() A : List[str] = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase__, lowerCamelCase__ )]
370
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ): A : str = tempfile.mkdtemp() A : Any = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """的""", """价""", """格""", """是""", """15""", """便""", """alex""", """##andra""", """,""", """。""", """-""", """t""", """shirt""", ] A : Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file, """w""", encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) A : Any = { """do_resize""": True, """size""": {"""height""": 224, """width""": 224}, """do_center_crop""": True, """crop_size""": {"""height""": 18, """width""": 18}, """do_normalize""": True, """image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073], """image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711], """do_convert_rgb""": True, } A : int = os.path.join(self.tmpdirname, lowerCamelCase__ ) with open(self.image_processor_file, """w""", encoding="""utf-8""" ) as fp: json.dump(lowerCamelCase__, lowerCamelCase__ ) def _lowerCAmelCase ( self, **lowerCamelCase__ ): return BertTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase__ ) def _lowerCAmelCase ( self, **lowerCamelCase__ ): return BertTokenizerFast.from_pretrained(self.tmpdirname, **lowerCamelCase__ ) def _lowerCAmelCase ( self, **lowerCamelCase__ ): return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname, **lowerCamelCase__ ) def _lowerCAmelCase ( self ): shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase ( self ): A : Optional[int] = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )] A : List[Any] = [Image.fromarray(np.moveaxis(lowerCamelCase__, 0, -1 ) ) for x in image_inputs] return image_inputs def _lowerCAmelCase ( self ): A : List[Any] = self.get_tokenizer() A : Dict = self.get_rust_tokenizer() A : List[Any] = self.get_image_processor() A : int = ChineseCLIPProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) processor_slow.save_pretrained(self.tmpdirname ) A : Optional[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCamelCase__ ) A : str = ChineseCLIPProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) processor_fast.save_pretrained(self.tmpdirname ) A : Tuple = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer, lowerCamelCase__ ) self.assertIsInstance(processor_fast.tokenizer, lowerCamelCase__ ) self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor, lowerCamelCase__ ) self.assertIsInstance(processor_fast.image_processor, lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : str = ChineseCLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A : Optional[int] = self.get_tokenizer(cls_token="""(CLS)""", sep_token="""(SEP)""" ) A : Optional[int] = self.get_image_processor(do_normalize=lowerCamelCase__ ) A : Optional[int] = ChineseCLIPProcessor.from_pretrained( self.tmpdirname, cls_token="""(CLS)""", sep_token="""(SEP)""", do_normalize=lowerCamelCase__ ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer, lowerCamelCase__ ) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor, lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : Tuple = self.get_image_processor() A : List[str] = self.get_tokenizer() A : List[str] = ChineseCLIPProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : Any = self.prepare_image_inputs() A : Union[str, Any] = image_processor(lowerCamelCase__, return_tensors="""np""" ) A : str = processor(images=lowerCamelCase__, return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 ) def _lowerCAmelCase ( self ): A : List[str] = self.get_image_processor() A : Tuple = self.get_tokenizer() A : str = ChineseCLIPProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : Any = """Alexandra,T-shirt的价格是15便士。""" A : Optional[Any] = processor(text=lowerCamelCase__ ) A : int = tokenizer(lowerCamelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key] ) def _lowerCAmelCase ( self ): A : Dict = self.get_image_processor() A : List[str] = self.get_tokenizer() A : int = ChineseCLIPProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : str = """Alexandra,T-shirt的价格是15便士。""" A : Dict = self.prepare_image_inputs() A : Optional[int] = processor(text=lowerCamelCase__, images=lowerCamelCase__ ) self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(lowerCamelCase__ ): processor() def _lowerCAmelCase ( self ): A : Union[str, Any] = self.get_image_processor() A : List[str] = self.get_tokenizer() A : List[str] = ChineseCLIPProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A : List[Any] = processor.batch_decode(lowerCamelCase__ ) A : str = tokenizer.batch_decode(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__, lowerCamelCase__ ) def _lowerCAmelCase ( self ): A : List[str] = self.get_image_processor() A : List[str] = self.get_tokenizer() A : Any = ChineseCLIPProcessor(tokenizer=lowerCamelCase__, image_processor=lowerCamelCase__ ) A : List[Any] = """Alexandra,T-shirt的价格是15便士。""" A : Optional[Any] = self.prepare_image_inputs() A : str = processor(text=lowerCamelCase__, images=lowerCamelCase__ ) self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
115
0
from __future__ import annotations from collections import namedtuple def lowercase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> tuple: _snake_case : int = namedtuple("""result""" , """name value""" ) if (voltage, current, power).count(0 ) != 1: raise ValueError("""Only one argument must be 0""" ) elif power < 0: raise ValueError( """Power cannot be negative in any electrical/electronics system""" ) elif voltage == 0: return result("""voltage""" , power / current ) elif current == 0: return result("""current""" , power / voltage ) elif power == 0: return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
317
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _a : Union[str, Any]= logging.get_logger(__name__) _a : str= { "alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json", } class UpperCamelCase ( lowercase ): UpperCAmelCase : List[str] = """mgp-str""" def __init__(self : List[Any] , _A : Dict=[32, 1_28] , _A : Any=4 , _A : int=3 , _A : Any=27 , _A : List[str]=38 , _A : str=5_02_57 , _A : Optional[int]=3_05_22 , _A : Union[str, Any]=7_68 , _A : Tuple=12 , _A : List[str]=12 , _A : List[str]=4.0 , _A : Optional[int]=True , _A : Optional[Any]=False , _A : Dict=1E-5 , _A : Optional[int]=0.0 , _A : str=0.0 , _A : int=0.0 , _A : str=False , _A : List[Any]=0.02 , **_A : Union[str, Any] , ) -> Tuple: super().__init__(**_A) __snake_case : Union[str, Any] = image_size __snake_case : Optional[int] = patch_size __snake_case : int = num_channels __snake_case : int = max_token_length __snake_case : List[Any] = num_character_labels __snake_case : Optional[int] = num_bpe_labels __snake_case : Optional[Any] = num_wordpiece_labels __snake_case : int = hidden_size __snake_case : List[Any] = num_hidden_layers __snake_case : List[str] = num_attention_heads __snake_case : Any = mlp_ratio __snake_case : List[str] = distilled __snake_case : List[Any] = layer_norm_eps __snake_case : List[Any] = drop_rate __snake_case : Optional[int] = qkv_bias __snake_case : Optional[int] = attn_drop_rate __snake_case : int = drop_path_rate __snake_case : List[str] = output_aa_attentions __snake_case : Optional[Any] = initializer_range
172
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _a = { 'configuration_blip_2': [ 'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Blip2Config', 'Blip2QFormerConfig', 'Blip2VisionConfig', ], 'processing_blip_2': ['Blip2Processor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ 'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Blip2Model', 'Blip2QFormerModel', 'Blip2PreTrainedModel', 'Blip2ForConditionalGeneration', 'Blip2VisionModel', ] if TYPE_CHECKING: from .configuration_blip_a import ( BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipaConfig, BlipaQFormerConfig, BlipaVisionConfig, ) from .processing_blip_a import BlipaProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip_a import ( BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, BlipaForConditionalGeneration, BlipaModel, BlipaPreTrainedModel, BlipaQFormerModel, BlipaVisionModel, ) else: import sys _a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
361
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input _a = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine' def _A ( ) -> Tuple: '''simple docstring''' __lowercase = _ask_options( "In which compute environment are you running?", ["This machine", "AWS (Amazon SageMaker)"], _convert_compute_environment, ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: __lowercase = get_sagemaker_input() else: __lowercase = get_cluster_input() return config def _A ( UpperCamelCase_ : Union[str, Any]=None) -> Union[str, Any]: '''simple docstring''' if subparsers is not None: __lowercase = subparsers.add_parser("config", description=UpperCamelCase_) else: __lowercase = argparse.ArgumentParser("Accelerate config command", description=UpperCamelCase_) parser.add_argument( "--config_file", default=UpperCamelCase_, help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ), ) if subparsers is not None: parser.set_defaults(func=UpperCamelCase_) return parser def _A ( UpperCamelCase_ : Dict) -> str: '''simple docstring''' __lowercase = get_user_input() if args.config_file is not None: __lowercase = args.config_file else: if not os.path.isdir(UpperCamelCase_): os.makedirs(UpperCamelCase_) __lowercase = default_yaml_config_file if config_file.endswith(".json"): config.to_json_file(UpperCamelCase_) else: config.to_yaml_file(UpperCamelCase_) print(F"""accelerate configuration saved at {config_file}""") def _A ( ) -> Optional[Any]: '''simple docstring''' __lowercase = config_command_parser() __lowercase = parser.parse_args() config_command(UpperCamelCase_) if __name__ == "__main__": main()
144
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { "configuration_blip_2": [ "BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Blip2Config", "Blip2QFormerConfig", "Blip2VisionConfig", ], "processing_blip_2": ["Blip2Processor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ "BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST", "Blip2Model", "Blip2QFormerModel", "Blip2PreTrainedModel", "Blip2ForConditionalGeneration", "Blip2VisionModel", ] if TYPE_CHECKING: from .configuration_blip_a import ( BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipaConfig, BlipaQFormerConfig, BlipaVisionConfig, ) from .processing_blip_a import BlipaProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip_a import ( BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, BlipaForConditionalGeneration, BlipaModel, BlipaPreTrainedModel, BlipaQFormerModel, BlipaVisionModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
191
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { "microsoft/beit-base-patch16-224-pt22k": ( "https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json" ), # See all BEiT models at https://huggingface.co/models?filter=beit } class _SCREAMING_SNAKE_CASE( A ): SCREAMING_SNAKE_CASE_ : str = '''beit''' def __init__( self ,SCREAMING_SNAKE_CASE__=81_92 ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=30_72 ,SCREAMING_SNAKE_CASE__="gelu" ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-12 ,SCREAMING_SNAKE_CASE__=2_24 ,SCREAMING_SNAKE_CASE__=16 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=[3, 5, 7, 11] ,SCREAMING_SNAKE_CASE__=[1, 2, 3, 6] ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=0.4 ,SCREAMING_SNAKE_CASE__=2_56 ,SCREAMING_SNAKE_CASE__=1 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=2_55 ,**SCREAMING_SNAKE_CASE__ ,) -> Optional[Any]: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Any = vocab_size __SCREAMING_SNAKE_CASE :Dict = hidden_size __SCREAMING_SNAKE_CASE :Union[str, Any] = num_hidden_layers __SCREAMING_SNAKE_CASE :List[str] = num_attention_heads __SCREAMING_SNAKE_CASE :Optional[Any] = intermediate_size __SCREAMING_SNAKE_CASE :Optional[int] = hidden_act __SCREAMING_SNAKE_CASE :Tuple = hidden_dropout_prob __SCREAMING_SNAKE_CASE :Optional[Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE :Optional[Any] = initializer_range __SCREAMING_SNAKE_CASE :str = layer_norm_eps __SCREAMING_SNAKE_CASE :int = image_size __SCREAMING_SNAKE_CASE :Tuple = patch_size __SCREAMING_SNAKE_CASE :Any = num_channels __SCREAMING_SNAKE_CASE :Any = use_mask_token __SCREAMING_SNAKE_CASE :Union[str, Any] = use_absolute_position_embeddings __SCREAMING_SNAKE_CASE :Union[str, Any] = use_relative_position_bias __SCREAMING_SNAKE_CASE :Union[str, Any] = use_shared_relative_position_bias __SCREAMING_SNAKE_CASE :List[str] = layer_scale_init_value __SCREAMING_SNAKE_CASE :Optional[Any] = drop_path_rate __SCREAMING_SNAKE_CASE :str = use_mean_pooling # decode head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE :Dict = out_indices __SCREAMING_SNAKE_CASE :Optional[int] = pool_scales # auxiliary head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE :Optional[int] = use_auxiliary_head __SCREAMING_SNAKE_CASE :Union[str, Any] = auxiliary_loss_weight __SCREAMING_SNAKE_CASE :Dict = auxiliary_channels __SCREAMING_SNAKE_CASE :Optional[int] = auxiliary_num_convs __SCREAMING_SNAKE_CASE :List[str] = auxiliary_concat_input __SCREAMING_SNAKE_CASE :List[Any] = semantic_loss_ignore_index class _SCREAMING_SNAKE_CASE( A ): SCREAMING_SNAKE_CASE_ : List[Any] = version.parse('''1.11''' ) @property def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _UpperCamelCase ( self ) -> float: """simple docstring""" return 1E-4
191
1
import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger UpperCAmelCase__ = get_logger(__name__) UpperCAmelCase__ = Path(__file__).parent / "model_card_template.md" UpperCAmelCase__ = uuida().hex UpperCAmelCase__ = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES UpperCAmelCase__ = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES UpperCAmelCase__ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/" def _a ( a :Union[Dict, str, None] = None ) -> str: a = F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}""" if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += F"""; torch/{_torch_version}""" if is_flax_available(): ua += F"""; jax/{_jax_version}""" ua += F"""; flax/{_flax_version}""" if is_onnx_available(): ua += F"""; onnxruntime/{_onnxruntime_version}""" # CI will set this value to True if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(a , a ): ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items() ) elif isinstance(a , a ): ua += "; " + user_agent return ua def _a ( a :str , a :Optional[str] = None , a :Optional[str] = None ) -> Optional[Any]: if token is None: a = HfFolder.get_token() if organization is None: a = whoami(a )['''name'''] return F"""{username}/{model_id}""" else: return F"""{organization}/{model_id}""" def _a ( a :Optional[Any] , a :str ) -> str: if not is_jinja_available(): raise ValueError( '''Modelcard rendering is based on Jinja templates.''' ''' Please make sure to have `jinja` installed before using `create_model_card`.''' ''' To install it, please run `pip install Jinja2`.''' ) if hasattr(a , '''local_rank''' ) and args.local_rank not in [-1, 0]: return a = args.hub_token if hasattr(a , '''hub_token''' ) else None a = get_full_repo_name(a , token=a ) a = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=a , model_name=a , repo_name=a , dataset_name=args.dataset_name if hasattr(a , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(a , '''gradient_accumulation_steps''' ) else None ) , adam_betaa=args.adam_betaa if hasattr(a , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(a , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(a , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(a , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(a , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(a , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(a , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(a , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(a , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , ) a = os.path.join(args.output_dir , '''README.md''' ) model_card.save(a ) def _a ( a :Optional[str] , a :Optional[str] = None ) -> int: if resolved_file is None or commit_hash is not None: return commit_hash a = str(Path(a ).as_posix() ) a = re.search(r'''snapshots/([^/]+)/''' , a ) if search is None: return None a = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(a ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. UpperCAmelCase__ = os.path.expanduser( os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface")) ) UpperCAmelCase__ = os.path.join(hf_cache_home, "diffusers") def _a ( a :Optional[str] = None , a :Optional[str] = None ) -> None: if new_cache_dir is None: a = DIFFUSERS_CACHE if old_cache_dir is None: a = old_diffusers_cache a = Path(a ).expanduser() a = Path(a ).expanduser() for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): a = new_cache_dir / old_blob_path.relative_to(a ) new_blob_path.parent.mkdir(parents=a , exist_ok=a ) os.replace(a , a ) try: os.symlink(a , a ) except OSError: logger.warning( '''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). UpperCAmelCase__ = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt") if not os.path.isfile(cache_version_file): UpperCAmelCase__ = 0 else: with open(cache_version_file) as f: try: UpperCAmelCase__ = int(f.read()) except ValueError: UpperCAmelCase__ = 0 if cache_version < 1: UpperCAmelCase__ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( "The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your " "existing cached models. This is a one-time operation, you can interrupt it or run it " "later by calling `diffusers.utils.hub_utils.move_cache()`." ) try: move_cache() except Exception as e: UpperCAmelCase__ = "\n".join(traceback.format_tb(e.__traceback__)) logger.error( f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """ "file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole " "message and we will do our best to help." ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, "w") as f: f.write("1") except Exception: logger.warning( f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """ "the directory exists and can be written to." ) def _a ( a :str , a :Optional[str] = None ) -> str: if variant is not None: a = weights_name.split('''.''' ) a = splits[:-1] + [variant] + splits[-1:] a = '''.'''.join(a ) return weights_name def _a ( a :Dict , *, a :Tuple , a :Any , a :Any , a :Dict , a :str , a :List[str] , a :List[str] , a :Optional[int] , a :Any , a :List[Any] , a :Optional[Any]=None , ) -> Any: a = str(a ) if os.path.isfile(a ): return pretrained_model_name_or_path elif os.path.isdir(a ): if os.path.isfile(os.path.join(a , a ) ): # Load from a PyTorch checkpoint a = os.path.join(a , a ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(a , a , a ) ): a = os.path.join(a , a , a ) return model_file else: raise EnvironmentError( F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(a ).base_version ) >= version.parse('''0.20.0''' ) ): try: a = hf_hub_download( a , filename=_add_variant(a , a ) , cache_dir=a , force_download=a , proxies=a , resume_download=a , local_files_only=a , use_auth_token=a , user_agent=a , subfolder=a , revision=revision or commit_hash , ) warnings.warn( F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , a , ) return model_file except: # noqa: E722 warnings.warn( F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(a , a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(a , a )}' so that the correct variant file can be added.""" , a , ) try: # 2. Load model file as usual a = hf_hub_download( a , filename=a , cache_dir=a , force_download=a , proxies=a , resume_download=a , local_files_only=a , use_auth_token=a , user_agent=a , subfolder=a , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """ '''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a ''' '''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli ''' '''login`.''' ) except RevisionNotFoundError: raise EnvironmentError( F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """ '''this model name. Check the model page at ''' F"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" ) except EntryNotFoundError: raise EnvironmentError( F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" ) except HTTPError as err: raise EnvironmentError( F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" ) except ValueError: raise EnvironmentError( F"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it""" F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a""" F""" directory containing a file named {weights_name} or""" ''' \nCheckout your internet connection or see how to run the library in''' ''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' ) except EnvironmentError: raise EnvironmentError( F"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """ '''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. ''' F"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """ F"""containing a file named {weights_name}""" )
26
import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() UpperCAmelCase__ = logging.get_logger("transformers.models.speecht5") def _a ( a :Optional[Any] , a :Tuple , a :Dict ) -> List[str]: hf_model.apply_weight_norm() a = checkpoint['''input_conv.weight_g'''] a = checkpoint['''input_conv.weight_v'''] a = checkpoint['''input_conv.bias'''] for i in range(len(config.upsample_rates ) ): a = checkpoint[F"""upsamples.{i}.1.weight_g"""] a = checkpoint[F"""upsamples.{i}.1.weight_v"""] a = checkpoint[F"""upsamples.{i}.1.bias"""] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): a = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""] a = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""] a = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""] a = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""] a = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""] a = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""] a = checkpoint['''output_conv.1.weight_g'''] a = checkpoint['''output_conv.1.weight_v'''] a = checkpoint['''output_conv.1.bias'''] hf_model.remove_weight_norm() @torch.no_grad() def _a ( a :List[str] , a :Union[str, Any] , a :Dict , a :Dict=None , a :List[Any]=None , ) -> int: if config_path is not None: a = SpeechTaHifiGanConfig.from_pretrained(a ) else: a = SpeechTaHifiGanConfig() a = SpeechTaHifiGan(a ) a = torch.load(a ) load_weights(orig_checkpoint['''model''']['''generator'''] , a , a ) a = np.load(a ) a = stats[0].reshape(-1 ) a = stats[1].reshape(-1 ) a = torch.from_numpy(a ).float() a = torch.from_numpy(a ).float() model.save_pretrained(a ) if repo_id: print('''Pushing to the hub...''' ) model.push_to_hub(a ) if __name__ == "__main__": UpperCAmelCase__ = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) UpperCAmelCase__ = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
26
1
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def lowerCAmelCase_ ( __a ) -> int: """simple docstring""" return 1.0 / (1.0 + np.exp(-_outputs )) def lowerCAmelCase_ ( __a ) -> Tuple: """simple docstring""" lowerCamelCase__: List[str] =np.max(_outputs , axis=-1 , keepdims=_SCREAMING_SNAKE_CASE ) lowerCamelCase__: List[Any] =np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_SCREAMING_SNAKE_CASE ) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = """sigmoid""" lowercase_ = """softmax""" lowercase_ = """none""" @add_end_docstrings( __SCREAMING_SNAKE_CASE , R"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , ) class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = False lowercase_ = ClassificationFunction.NONE def __init__(self : Optional[Any] , **UpperCAmelCase_ : Tuple) ->Any: '''simple docstring''' super().__init__(**__UpperCAmelCase) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING) def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : int="" , **UpperCAmelCase_ : Dict) ->int: '''simple docstring''' lowerCamelCase__: Optional[int] =tokenizer_kwargs lowerCamelCase__: List[Any] ={} if hasattr(self.model.config , "return_all_scores") and return_all_scores is None: lowerCamelCase__: List[Any] =self.model.config.return_all_scores if isinstance(__UpperCAmelCase , __UpperCAmelCase) or top_k is None: lowerCamelCase__: int =top_k lowerCamelCase__: Dict =False elif return_all_scores is not None: warnings.warn( "`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of" " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , __UpperCAmelCase , ) if return_all_scores: lowerCamelCase__: List[Any] =None else: lowerCamelCase__: Union[str, Any] =1 if isinstance(__UpperCAmelCase , __UpperCAmelCase): lowerCamelCase__: Union[str, Any] =ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: lowerCamelCase__: List[Any] =function_to_apply return preprocess_params, {}, postprocess_params def __call__(self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Any) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Union[str, Any] =super().__call__(*__UpperCAmelCase , **__UpperCAmelCase) # TODO try and retrieve it in a nicer way from _sanitize_parameters. lowerCamelCase__: Optional[Any] ='top_k' not in kwargs if isinstance(args[0] , __UpperCAmelCase) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : int) ->str: '''simple docstring''' lowerCamelCase__: Dict =self.framework if isinstance(__UpperCAmelCase , __UpperCAmelCase): return self.tokenizer(**__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase) elif isinstance(__UpperCAmelCase , __UpperCAmelCase) and len(__UpperCAmelCase) == 1 and isinstance(inputs[0] , __UpperCAmelCase) and len(inputs[0]) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__UpperCAmelCase , **__UpperCAmelCase) elif isinstance(__UpperCAmelCase , __UpperCAmelCase): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( "The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a" " dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.") return self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[int]) ->str: '''simple docstring''' return self.model(**__UpperCAmelCase) def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : List[Any]=True) ->List[str]: '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: lowerCamelCase__: str =ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: lowerCamelCase__: int =ClassificationFunction.SOFTMAX elif hasattr(self.model.config , "function_to_apply") and function_to_apply is None: lowerCamelCase__: Optional[Any] =self.model.config.function_to_apply else: lowerCamelCase__: Dict =ClassificationFunction.NONE lowerCamelCase__: int =model_outputs['logits'][0] lowerCamelCase__: Union[str, Any] =outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: lowerCamelCase__: Dict =sigmoid(__UpperCAmelCase) elif function_to_apply == ClassificationFunction.SOFTMAX: lowerCamelCase__: int =softmax(__UpperCAmelCase) elif function_to_apply == ClassificationFunction.NONE: lowerCamelCase__: Tuple =outputs else: raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""") if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} lowerCamelCase__: Any =[ {'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(__UpperCAmelCase) ] if not _legacy: dict_scores.sort(key=lambda UpperCAmelCase_: x["score"] , reverse=__UpperCAmelCase) if top_k is not None: lowerCamelCase__: List[str] =dict_scores[:top_k] return dict_scores
10
"""simple docstring""" from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class _lowerCAmelCase ( yaml.SafeLoader ): """simple docstring""" def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :List[Any] = [self.constructed_objects[key_node] for key_node, _ in node.value] lowerCAmelCase__ :str = [tuple(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else key for key in keys] lowerCAmelCase__ :Optional[int] = Counter(__UpperCAmelCase ) lowerCAmelCase__ :int = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"Got duplicate yaml keys: {duplicate_keys}" ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = super().construct_mapping(__UpperCAmelCase , deep=__UpperCAmelCase ) self._check_no_duplicates_on_constructed_node(__UpperCAmelCase ) return mapping def __A (_SCREAMING_SNAKE_CASE ) ->Tuple[Optional[str], str]: """simple docstring""" lowerCAmelCase__ :Optional[Any] = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: lowerCAmelCase__ :Optional[int] = full_content[1:].index('---' ) + 1 lowerCAmelCase__ :Union[str, Any] = '\n'.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(_SCREAMING_SNAKE_CASE ) class _lowerCAmelCase ( a ): """simple docstring""" __magic_name__ :List[str] = {"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def snake_case ( cls , __UpperCAmelCase ): '''simple docstring''' with open(__UpperCAmelCase , encoding='utf-8' ) as readme_file: lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(__UpperCAmelCase ) else: return cls() def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if path.exists(): with open(__UpperCAmelCase , encoding='utf-8' ) as readme_file: lowerCAmelCase__ :Optional[Any] = readme_file.read() else: lowerCAmelCase__ :Union[str, Any] = None lowerCAmelCase__ :Union[str, Any] = self._to_readme(__UpperCAmelCase ) with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as readme_file: readme_file.write(__UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase = None ): '''simple docstring''' if readme_content is not None: lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = _split_yaml_from_readme(__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = '---\n' + self.to_yaml_string() + '---\n' + content else: lowerCAmelCase__ :str = '---\n' + self.to_yaml_string() + '---\n' return full_content @classmethod def snake_case ( cls , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Dict = yaml.load(__UpperCAmelCase , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields lowerCAmelCase__ :int = { (key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' return yaml.safe_dump( { (key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=__UpperCAmelCase , allow_unicode=__UpperCAmelCase , encoding='utf-8' , ).decode('utf-8' ) __A = { """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser __A = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") __A = ap.parse_args() __A = Path(args.readme_filepath) __A = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
293
0
from __future__ import annotations def UpperCAmelCase_ ( _A ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = 2 SCREAMING_SNAKE_CASE__ = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(_A ) if n > 1: factors.append(_A ) return factors if __name__ == "__main__": import doctest doctest.testmod()
218
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def UpperCAmelCase_ ( _A , _A ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = args.log_outputs SCREAMING_SNAKE_CASE__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric SCREAMING_SNAKE_CASE__ = load_metric('''wer''' ) SCREAMING_SNAKE_CASE__ = load_metric('''cer''' ) # compute metrics SCREAMING_SNAKE_CASE__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) SCREAMING_SNAKE_CASE__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results SCREAMING_SNAKE_CASE__ = F'''WER: {wer_result}\nCER: {cer_result}''' print(_A ) with open(F'''{dataset_id}_eval_results.txt''' , '''w''' ) as f: f.write(_A ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: SCREAMING_SNAKE_CASE__ = F'''log_{dataset_id}_predictions.txt''' SCREAMING_SNAKE_CASE__ = F'''log_{dataset_id}_targets.txt''' with open(_A , '''w''' ) as p, open(_A , '''w''' ) as t: # mapping function to write output def write_to_file(_A , _A ): p.write(F'''{i}''' + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(F'''{i}''' + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(_A , with_indices=_A ) def UpperCAmelCase_ ( _A ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training SCREAMING_SNAKE_CASE__ = re.sub(_A , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! SCREAMING_SNAKE_CASE__ = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: SCREAMING_SNAKE_CASE__ = ''' '''.join(text.split(_A ) ) return text def UpperCAmelCase_ ( _A ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_A ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor SCREAMING_SNAKE_CASE__ = AutoFeatureExtractor.from_pretrained(args.model_id ) SCREAMING_SNAKE_CASE__ = feature_extractor.sampling_rate # resample audio SCREAMING_SNAKE_CASE__ = dataset.cast_column('''audio''' , Audio(sampling_rate=_A ) ) # load eval pipeline if args.device is None: SCREAMING_SNAKE_CASE__ = 0 if torch.cuda.is_available() else -1 SCREAMING_SNAKE_CASE__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(_A ): SCREAMING_SNAKE_CASE__ = asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) SCREAMING_SNAKE_CASE__ = prediction['''text'''] SCREAMING_SNAKE_CASE__ = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples SCREAMING_SNAKE_CASE__ = dataset.map(_A , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(_A , _A ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser() parser.add_argument( '''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers''' ) parser.add_argument( '''--dataset''', type=str, required=True, help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''', ) parser.add_argument( '''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice''' ) parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''') parser.add_argument( '''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.''' ) parser.add_argument( '''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.''' ) parser.add_argument( '''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.''' ) parser.add_argument( '''--device''', type=int, default=None, help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''', ) _SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args() main(args)
218
1
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device snake_case_ : Optional[Any] = False class __snake_case ( unittest.TestCase ): pass @nightly @require_torch_gpu class __snake_case ( unittest.TestCase ): def lowerCamelCase ( self : int): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''') # remove text_unet pipe.remove_unused_weights() pipe.to(_snake_case) pipe.set_progress_bar_config(disable=_snake_case) UpperCAmelCase_ = '''A painting of a squirrel eating a burger ''' UpperCAmelCase_ = torch.manual_seed(0) UpperCAmelCase_ = pipe( prompt=_snake_case , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''').images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_snake_case) UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained(_snake_case) pipe.to(_snake_case) pipe.set_progress_bar_config(disable=_snake_case) UpperCAmelCase_ = generator.manual_seed(0) UpperCAmelCase_ = pipe( prompt=_snake_case , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''').images assert np.abs(image - new_image).sum() < 1e-5, "Models don't have the same forward pass" def lowerCamelCase ( self : List[str]): """simple docstring""" UpperCAmelCase_ = VersatileDiffusionTextToImagePipeline.from_pretrained( '''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa) pipe.to(_snake_case) pipe.set_progress_bar_config(disable=_snake_case) UpperCAmelCase_ = '''A painting of a squirrel eating a burger ''' UpperCAmelCase_ = torch.manual_seed(0) UpperCAmelCase_ = pipe( prompt=_snake_case , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''').images UpperCAmelCase_ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
51
from __future__ import annotations from collections.abc import Callable __UpperCAmelCase = list[list[float | int]] def A__ ( __lowerCamelCase, __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ = [[0 for _ in range(size + 1 )] for _ in range(__lowerCamelCase )] SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 for row in range(__lowerCamelCase ): for col in range(__lowerCamelCase ): SCREAMING_SNAKE_CASE_ = matrix[row][col] SCREAMING_SNAKE_CASE_ = vector[row][0] SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 while row < size and col < size: # pivoting SCREAMING_SNAKE_CASE_ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCamelCase, __lowerCamelCase ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = augmented[pivot_row], augmented[row] for rowa in range(row + 1, __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = augmented[rowa][col] / augmented[row][col] SCREAMING_SNAKE_CASE_ = 0 for cola in range(col + 1, size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1, __lowerCamelCase ): for row in range(__lowerCamelCase ): SCREAMING_SNAKE_CASE_ = augmented[row][col] / augmented[col][col] for cola in range(__lowerCamelCase, size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row], 10 )] for row in range(__lowerCamelCase ) ] def A__ ( __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) SCREAMING_SNAKE_CASE_ = [[0 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )] SCREAMING_SNAKE_CASE_ = [[0] for _ in range(__lowerCamelCase )] SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 for x_val, y_val in enumerate(__lowerCamelCase ): for col in range(__lowerCamelCase ): SCREAMING_SNAKE_CASE_ = (x_val + 1) ** (size - col - 1) SCREAMING_SNAKE_CASE_ = y_val SCREAMING_SNAKE_CASE_ = solve(__lowerCamelCase, __lowerCamelCase ) def interpolated_func(__lowerCamelCase ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(__lowerCamelCase ) ) return interpolated_func def A__ ( __lowerCamelCase ): return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def A__ ( __lowerCamelCase = question_function, __lowerCamelCase = 10 ): SCREAMING_SNAKE_CASE_ = [func(__lowerCamelCase ) for x_val in range(1, order + 1 )] SCREAMING_SNAKE_CASE_ = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 ) ] SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 for poly in polynomials: SCREAMING_SNAKE_CASE_ = 1 while func(__lowerCamelCase ) == poly(__lowerCamelCase ): x_val += 1 ret += poly(__lowerCamelCase ) return ret if __name__ == "__main__": print(F"""{solution() = }""")
299
0
"""simple docstring""" def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : int ) -> float: '''simple docstring''' if principal <= 0: raise Exception("""Principal borrowed must be > 0""" ) if rate_per_annum < 0: raise Exception("""Rate of interest must be >= 0""" ) if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ): raise Exception("""Years to repay must be an integer > 0""" ) # Yearly rate is divided by 12 to get monthly rate lowerCAmelCase_ :Any = rate_per_annum / 1_2 # Years to repay is multiplied by 12 to get number of payments as payment is monthly lowerCAmelCase_ :Optional[int] = years_to_repay * 1_2 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
1
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __UpperCAmelCase = 16 __UpperCAmelCase = 32 def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 ) -> str: '''simple docstring''' lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowercase__ : int ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase_ :Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCAmelCase_ :Optional[Any] = datasets.map( lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase_ :str = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowercase__ : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCAmelCase_ :int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCAmelCase_ :List[Any] = 1_6 elif accelerator.mixed_precision != "no": lowerCAmelCase_ :List[str] = 8 else: lowerCAmelCase_ :Optional[int] = None return tokenizer.pad( lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , ) # Instantiate dataloaders. lowerCAmelCase_ :Optional[Any] = DataLoader( tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) lowerCAmelCase_ :List[Any] = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __UpperCAmelCase = mocked_dataloaders # noqa: F811 def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[int] ) -> Optional[Any]: '''simple docstring''' if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1": lowerCAmelCase_ :Optional[Any] = 2 # New Code # lowerCAmelCase_ :List[str] = int(args.gradient_accumulation_steps ) lowerCAmelCase_ :int = int(args.local_sgd_steps ) # Initialize accelerator lowerCAmelCase_ :str = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase_ :int = config["""lr"""] lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] ) lowerCAmelCase_ :int = int(config["""seed"""] ) lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] ) lowerCAmelCase_ :Union[str, Any] = evaluate.load("""glue""" , """mrpc""" ) set_seed(lowercase__ ) lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = get_dataloaders(lowercase__ , lowercase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase_ :Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCAmelCase_ :Union[str, Any] = model.to(accelerator.device ) # Instantiate optimizer lowerCAmelCase_ :Optional[Any] = AdamW(params=model.parameters() , lr=lowercase__ ) # Instantiate scheduler lowerCAmelCase_ :Union[str, Any] = get_linear_schedule_with_warmup( optimizer=lowercase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase__ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.prepare( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Now we train the model for epoch in range(lowercase__ ): model.train() with LocalSGD( accelerator=lowercase__ , model=lowercase__ , local_sgd_steps=lowercase__ , enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(lowercase__ ): lowerCAmelCase_ :str = model(**lowercase__ ) lowerCAmelCase_ :Optional[int] = output.loss accelerator.backward(lowercase__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase_ :Optional[int] = model(**lowercase__ ) lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 ) lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowercase__ , references=lowercase__ , ) lowerCAmelCase_ :Any = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , lowercase__ ) def _snake_case ( ) -> Tuple: '''simple docstring''' lowerCAmelCase_ :str = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) # New Code # parser.add_argument( """--gradient_accumulation_steps""" , type=lowercase__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , ) parser.add_argument( """--local_sgd_steps""" , type=lowercase__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) lowerCAmelCase_ :Optional[Any] = parser.parse_args() lowerCAmelCase_ :Tuple = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6} training_function(lowercase__ , lowercase__ ) if __name__ == "__main__": main()
1
1
from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 10**-10 )-> float: '''simple docstring''' UpperCAmelCase : Union[str, Any] =a while True: UpperCAmelCase : Union[str, Any] =Decimal(_A ) - ( Decimal(eval(_A ) ) / Decimal(eval(str(diff(_A ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(_A ) ) < precision: # noqa: S307 return float(_A ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}') # Find root of polynomial print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}') # Find Square Root of 5 print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}') # Exponential Roots print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
348
'''simple docstring''' def snake_case__ ( _A: str ) -> list[int]: '''simple docstring''' lowerCAmelCase = [0 for i in range(len(_A ) )] # initialize interval's left pointer and right pointer lowerCAmelCase , lowerCAmelCase = 0, 0 for i in range(1 , len(_A ) ): # case when current index is inside the interval if i <= right_pointer: lowerCAmelCase = min(right_pointer - i + 1 , z_result[i - left_pointer] ) lowerCAmelCase = min_edge while go_next(_A , _A , _A ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: lowerCAmelCase , lowerCAmelCase = i, i + z_result[i] - 1 return z_result def snake_case__ ( _A: int , _A: list[int] , _A: str ) -> bool: '''simple docstring''' return i + z_result[i] < len(_A ) and s[z_result[i]] == s[i + z_result[i]] def snake_case__ ( _A: str , _A: str ) -> int: '''simple docstring''' lowerCAmelCase = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string lowerCAmelCase = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(_A ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
272
0
'''simple docstring''' import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowercase (_A ): """simple docstring""" return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() ) def lowercase (_A , _A ): """simple docstring""" _lowerCAmelCase : List[str] = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue _lowerCAmelCase : int = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' ) _lowerCAmelCase : str = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' ) _lowerCAmelCase : Optional[Any] = key.replace('heads.cmd.itm_head.cls' , 'itm_head' ) _lowerCAmelCase : str = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' ) _lowerCAmelCase : List[str] = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' ) _lowerCAmelCase : int = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' ) _lowerCAmelCase : str = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' ) _lowerCAmelCase : Dict = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' ) _lowerCAmelCase : int = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' ) _lowerCAmelCase : Optional[Any] = key.replace('image_encoder.module' , 'flava.image_model' ) _lowerCAmelCase : Tuple = key.replace('text_encoder.module' , 'flava.text_model' ) _lowerCAmelCase : int = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' ) _lowerCAmelCase : int = key.replace('mm_encoder.module' , 'flava.multimodal_model' ) _lowerCAmelCase : Optional[int] = key.replace('text_projection' , 'flava.text_projection' ) _lowerCAmelCase : Dict = key.replace('image_projection' , 'flava.image_projection' ) _lowerCAmelCase : Tuple = value.float() for key, value in codebook_state_dict.items(): _lowerCAmelCase : str = value return upgrade @torch.no_grad() def lowercase (_A , _A , _A , _A=None ): """simple docstring""" if config_path is not None: _lowerCAmelCase : Optional[Any] = FlavaConfig.from_pretrained(_A ) else: _lowerCAmelCase : Optional[int] = FlavaConfig() _lowerCAmelCase : Optional[int] = FlavaForPreTraining(_A ).eval() _lowerCAmelCase : str = convert_dalle_checkpoint(_A , _A , save_checkpoint=_A ) if os.path.exists(_A ): _lowerCAmelCase : Optional[Any] = torch.load(_A , map_location='cpu' ) else: _lowerCAmelCase : Tuple = torch.hub.load_state_dict_from_url(_A , map_location='cpu' ) _lowerCAmelCase : List[Any] = upgrade_state_dict(_A , _A ) hf_model.load_state_dict(_A ) _lowerCAmelCase : str = hf_model.state_dict() _lowerCAmelCase : str = count_parameters(_A ) _lowerCAmelCase : Any = count_parameters(_A ) + count_parameters(_A ) assert torch.allclose(_A , _A , atol=1E-3 ) hf_model.save_pretrained(_A ) if __name__ == "__main__": lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") lowerCAmelCase : Any = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
25
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = { """CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": ( """https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json""" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = "trajectory_transformer" __magic_name__ = ["past_key_values"] __magic_name__ = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , snake_case__=100 , snake_case__=5 , snake_case__=1 , snake_case__=1 , snake_case__=249 , snake_case__=6 , snake_case__=17 , snake_case__=25 , snake_case__=4 , snake_case__=4 , snake_case__=128 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0006 , snake_case__=512 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=True , snake_case__=1 , snake_case__=5_0256 , snake_case__=5_0256 , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : List[Any] = vocab_size _lowerCAmelCase : Any = action_weight _lowerCAmelCase : Optional[int] = reward_weight _lowerCAmelCase : Union[str, Any] = value_weight _lowerCAmelCase : List[str] = max_position_embeddings _lowerCAmelCase : Tuple = block_size _lowerCAmelCase : List[Any] = action_dim _lowerCAmelCase : List[Any] = observation_dim _lowerCAmelCase : Union[str, Any] = transition_dim _lowerCAmelCase : Tuple = learning_rate _lowerCAmelCase : int = n_layer _lowerCAmelCase : Any = n_head _lowerCAmelCase : Tuple = n_embd _lowerCAmelCase : Optional[Any] = embd_pdrop _lowerCAmelCase : Union[str, Any] = attn_pdrop _lowerCAmelCase : Any = resid_pdrop _lowerCAmelCase : Optional[Any] = initializer_range _lowerCAmelCase : List[Any] = layer_norm_eps _lowerCAmelCase : Union[str, Any] = kaiming_initializer_range _lowerCAmelCase : List[Any] = use_cache super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
25
1
def a ( A__ : int ) -> Union[str, Any]: """simple docstring""" if a < 0: raise ValueError('Input value must be a positive integer' ) elif isinstance(A__ , A__ ): raise TypeError('Input value must be a \'int\' type' ) return bin(A__ ).count('1' ) if __name__ == "__main__": import doctest doctest.testmod()
205
import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ): """simple docstring""" UpperCamelCase__ : List[Any] = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' ) UpperCamelCase__ : str = chkpt['''model'''] # We have the base model one level deeper than the original XLM repository UpperCamelCase__ : int = {} for k, v in state_dict.items(): if "pred_layer" in k: UpperCamelCase__ : Optional[int] = v else: UpperCamelCase__ : Tuple = v UpperCamelCase__ : Union[str, Any] = chkpt['''params'''] UpperCamelCase__ : Optional[Any] = {n: v for n, v in config.items() if not isinstance(SCREAMING_SNAKE_CASE , (torch.FloatTensor, numpy.ndarray) )} UpperCamelCase__ : Dict = chkpt['''dico_word2id'''] UpperCamelCase__ : Dict = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()} # Save pytorch-model UpperCamelCase__ : List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME UpperCamelCase__ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME UpperCamelCase__ : Any = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file'''] print(F"Save PyTorch model to {pytorch_weights_dump_path}" ) torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) print(F"Save configuration file to {pytorch_config_dump_path}" ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE , indent=2 ) + '''\n''' ) print(F"Save vocab file to {pytorch_config_dump_path}" ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE , indent=2 ) + '''\n''' ) if __name__ == "__main__": __UpperCamelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __UpperCamelCase : Optional[int] = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
146
0
"""simple docstring""" import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def A_ ( self ): _lowerCamelCase : Dict = 1 _lowerCamelCase : Optional[int] = 3 _lowerCamelCase : str = (32, 32) _lowerCamelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase ) return image @property def A_ ( self ): torch.manual_seed(0 ) _lowerCamelCase : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) return model @property def A_ ( self ): torch.manual_seed(0 ) _lowerCamelCase : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def A_ ( self ): torch.manual_seed(0 ) _lowerCamelCase : List[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(lowercase ) @property def A_ ( self ): def extract(*lowercase , **lowercase ): class lowerCAmelCase__ : '''simple docstring''' def __init__( self ): _lowerCamelCase : Any = torch.ones([0] ) def A_ ( self , lowercase ): self.pixel_values.to(lowercase ) return self return Out() return extract def A_ ( self ): _lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator _lowerCamelCase : int = self.dummy_cond_unet _lowerCamelCase : Union[str, Any] = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowercase , set_alpha_to_one=lowercase , ) _lowerCamelCase : List[Any] = self.dummy_vae _lowerCamelCase : int = self.dummy_text_encoder _lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # make sure here that pndm scheduler skips prk _lowerCamelCase : str = StableDiffusionPipeline( unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , ) _lowerCamelCase : Dict = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : List[str] = 'A painting of a squirrel eating a burger' _lowerCamelCase : str = torch.Generator(device=lowercase ).manual_seed(0 ) _lowerCamelCase : Dict = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' ) _lowerCamelCase : List[str] = output.images _lowerCamelCase : Optional[Any] = torch.Generator(device=lowercase ).manual_seed(0 ) _lowerCamelCase : Union[str, Any] = sd_pipe( [prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowercase , )[0] _lowerCamelCase : str = image[0, -3:, -3:, -1] _lowerCamelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _lowerCamelCase : str = np.array([0.57_56, 0.61_18, 0.50_05, 0.50_41, 0.54_71, 0.47_26, 0.49_76, 0.48_65, 0.48_64] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def A_ ( self ): _lowerCamelCase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator _lowerCamelCase : List[Any] = self.dummy_cond_unet _lowerCamelCase : Tuple = PNDMScheduler(skip_prk_steps=lowercase ) _lowerCamelCase : List[str] = self.dummy_vae _lowerCamelCase : List[str] = self.dummy_text_encoder _lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # make sure here that pndm scheduler skips prk _lowerCamelCase : str = StableDiffusionPipeline( unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , ) _lowerCamelCase : Optional[int] = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : Tuple = 'A painting of a squirrel eating a burger' _lowerCamelCase : int = torch.Generator(device=lowercase ).manual_seed(0 ) _lowerCamelCase : List[Any] = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' ) _lowerCamelCase : Union[str, Any] = output.images _lowerCamelCase : str = torch.Generator(device=lowercase ).manual_seed(0 ) _lowerCamelCase : Any = sd_pipe( [prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowercase , )[0] _lowerCamelCase : List[Any] = image[0, -3:, -3:, -1] _lowerCamelCase : List[str] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _lowerCamelCase : str = np.array([0.51_25, 0.57_16, 0.48_28, 0.50_60, 0.56_50, 0.47_68, 0.51_85, 0.48_95, 0.49_93] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def A_ ( self ): _lowerCamelCase : Union[str, Any] = StableDiffusionPipeline.from_pretrained( 'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowercase ) assert isinstance(lowercase , lowercase ) assert isinstance(pipe.scheduler , lowercase ) assert pipe.safety_checker is None _lowerCamelCase : Any = pipe('example prompt' , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(lowercase ) _lowerCamelCase : Dict = StableDiffusionPipeline.from_pretrained(lowercase ) # sanity check that the pipeline still works assert pipe.safety_checker is None _lowerCamelCase : Tuple = pipe('example prompt' , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def A_ ( self ): _lowerCamelCase : int = self.dummy_cond_unet _lowerCamelCase : Optional[int] = PNDMScheduler(skip_prk_steps=lowercase ) _lowerCamelCase : Any = self.dummy_vae _lowerCamelCase : Union[str, Any] = self.dummy_text_encoder _lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) # put models in fp16 _lowerCamelCase : Optional[Any] = unet.half() _lowerCamelCase : Any = vae.half() _lowerCamelCase : Tuple = bert.half() # make sure here that pndm scheduler skips prk _lowerCamelCase : Tuple = StableDiffusionPipeline( unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , ) _lowerCamelCase : Optional[Any] = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : Any = 'A painting of a squirrel eating a burger' _lowerCamelCase : List[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self ): _lowerCamelCase : Dict = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowercase ) _lowerCamelCase : Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) _lowerCamelCase : Any = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : Tuple = ( 'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle' ' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with' ' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and' ' children from bahnhof zoo, detailed ' ) _lowerCamelCase : Dict = 4003660346 _lowerCamelCase : str = 7 # without safety guidance (sld_guidance_scale = 0) _lowerCamelCase : Dict = torch.manual_seed(lowercase ) _lowerCamelCase : List[Any] = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , ) _lowerCamelCase : int = output.images _lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1] _lowerCamelCase : List[str] = [0.22_78, 0.22_31, 0.22_49, 0.23_33, 0.23_03, 0.18_85, 0.22_73, 0.21_44, 0.21_76] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) _lowerCamelCase : str = torch.manual_seed(lowercase ) _lowerCamelCase : Optional[int] = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _lowerCamelCase : Dict = output.images _lowerCamelCase : Tuple = image[0, -3:, -3:, -1] _lowerCamelCase : List[str] = [0.23_83, 0.22_76, 0.2_36, 0.21_92, 0.21_86, 0.20_53, 0.19_71, 0.19_01, 0.17_19] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A_ ( self ): _lowerCamelCase : List[Any] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowercase ) _lowerCamelCase : Dict = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) _lowerCamelCase : Dict = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : int = 'padme amidala taking a bath artwork, safe for work, no nudity' _lowerCamelCase : Tuple = 2734971755 _lowerCamelCase : Tuple = 7 _lowerCamelCase : Optional[int] = torch.manual_seed(lowercase ) _lowerCamelCase : Tuple = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , ) _lowerCamelCase : int = output.images _lowerCamelCase : Tuple = image[0, -3:, -3:, -1] _lowerCamelCase : Any = [0.35_02, 0.36_22, 0.33_96, 0.36_42, 0.34_78, 0.33_18, 0.35, 0.33_48, 0.32_97] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 _lowerCamelCase : Tuple = torch.manual_seed(lowercase ) _lowerCamelCase : Any = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _lowerCamelCase : Union[str, Any] = output.images _lowerCamelCase : Dict = image[0, -3:, -3:, -1] _lowerCamelCase : Optional[int] = [0.55_31, 0.52_06, 0.48_95, 0.51_56, 0.51_82, 0.47_51, 0.48_02, 0.48_03, 0.44_43] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A_ ( self ): _lowerCamelCase : Tuple = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' ) _lowerCamelCase : Union[str, Any] = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) _lowerCamelCase : Optional[Any] = ( 'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.' ' leyendecker' ) _lowerCamelCase : int = 1044355234 _lowerCamelCase : Any = 12 _lowerCamelCase : List[Any] = torch.manual_seed(lowercase ) _lowerCamelCase : Tuple = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , ) _lowerCamelCase : Union[str, Any] = output.images _lowerCamelCase : List[str] = image[0, -3:, -3:, -1] _lowerCamelCase : Union[str, Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 _lowerCamelCase : Union[str, Any] = torch.manual_seed(lowercase ) _lowerCamelCase : Dict = sd_pipe( [prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_25 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _lowerCamelCase : str = output.images _lowerCamelCase : List[Any] = image[0, -3:, -3:, -1] _lowerCamelCase : int = np.array([0.58_18, 0.62_85, 0.68_35, 0.60_19, 0.6_25, 0.67_54, 0.60_96, 0.63_34, 0.65_61] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
12
"""simple docstring""" from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def _snake_case ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ): if attention_mask is None: _lowerCamelCase : List[str] = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = OPTConfig lowerCamelCase__ = {} lowerCamelCase__ = """gelu""" def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=16 , lowercase=2 , lowercase=4 , lowercase=4 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , lowercase=16 , lowercase=16 , ): _lowerCamelCase : Tuple = parent _lowerCamelCase : Any = batch_size _lowerCamelCase : Tuple = seq_length _lowerCamelCase : str = is_training _lowerCamelCase : Optional[int] = use_labels _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : Dict = hidden_size _lowerCamelCase : str = num_hidden_layers _lowerCamelCase : Optional[int] = num_attention_heads _lowerCamelCase : Any = intermediate_size _lowerCamelCase : Dict = hidden_act _lowerCamelCase : Any = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Optional[Any] = max_position_embeddings _lowerCamelCase : List[Any] = eos_token_id _lowerCamelCase : Tuple = pad_token_id _lowerCamelCase : List[str] = bos_token_id _lowerCamelCase : Optional[int] = embed_dim _lowerCamelCase : List[str] = word_embed_proj_dim _lowerCamelCase : Any = False def A_ ( self ): _lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _lowerCamelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _lowerCamelCase : str = tf.concat([input_ids, eos_tensor] , axis=1 ) _lowerCamelCase : Tuple = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase , **self.config_updates , ) _lowerCamelCase : int = prepare_opt_inputs_dict(lowercase , lowercase ) return config, inputs_dict def A_ ( self , lowercase , lowercase ): _lowerCamelCase : Optional[Any] = TFOPTModel(config=lowercase ) _lowerCamelCase : Optional[Any] = inputs_dict['input_ids'] _lowerCamelCase : str = input_ids[:1, :] _lowerCamelCase : Dict = inputs_dict['attention_mask'][:1, :] _lowerCamelCase : Optional[Any] = 1 # first forward pass _lowerCamelCase : Any = model(lowercase , attention_mask=lowercase , use_cache=lowercase ) _lowerCamelCase, _lowerCamelCase : List[str] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _lowerCamelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _lowerCamelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _lowerCamelCase : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 ) _lowerCamelCase : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase )[0] _lowerCamelCase : List[str] = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _lowerCamelCase : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _lowerCamelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx] _lowerCamelCase : List[str] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 ) @require_tf class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () lowerCamelCase__ = (TFOPTForCausalLM,) if is_tf_available() else () lowerCamelCase__ = ( {"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {} ) lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = False lowerCamelCase__ = 10 def A_ ( self ): _lowerCamelCase : int = TFOPTModelTester(self ) _lowerCamelCase : Tuple = ConfigTester(self , config_class=lowercase ) def A_ ( self ): self.config_tester.run_common_tests() def A_ ( self ): _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) def A_ ( self ): _lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(lowercase , lowercase ): if hasattr(lowercase , 'weight' ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(lowercase , 'weight' ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings _lowerCamelCase : Optional[int] = model_class(config=lowercase ) _lowerCamelCase : int = _get_word_embedding_weight(lowercase , model.get_input_embeddings() ) _lowerCamelCase : Tuple = _get_word_embedding_weight(lowercase , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(lowercase ) _lowerCamelCase : str = _get_word_embedding_weight(lowercase , model.get_input_embeddings() ) _lowerCamelCase : Any = _get_word_embedding_weight(lowercase , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. _lowerCamelCase : Union[str, Any] = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , lowercase ) # check that weights remain the same after resizing _lowerCamelCase : int = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: _lowerCamelCase : Optional[Any] = False self.assertTrue(lowercase ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , lowercase ) _lowerCamelCase : Dict = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: _lowerCamelCase : Union[str, Any] = False self.assertTrue(lowercase ) def _snake_case ( lowercase__ ): return tf.constant(lowercase__ , dtype=tf.intaa ) @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = 99 def A_ ( self ): _lowerCamelCase : Tuple = tf.ones((4, 1) , dtype=tf.intaa ) * 2 _lowerCamelCase : Tuple = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) _lowerCamelCase : int = input_ids.shape[0] _lowerCamelCase : List[Any] = OPTConfig( vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @slow def A_ ( self ): _lowerCamelCase : Tuple = TFOPTModel.from_pretrained('facebook/opt-350m' ) _lowerCamelCase : List[Any] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) _lowerCamelCase : List[str] = tf.not_equal(lowercase , model.config.pad_token_id ) with tf.GradientTape(): _lowerCamelCase : List[str] = model(input_ids=lowercase , attention_mask=lowercase ).last_hidden_state _lowerCamelCase : Optional[Any] = (1, 11, 512) self.assertEqual(output.shape , lowercase ) _lowerCamelCase : List[str] = tf.constant( [[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] ) self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-3 ) ) _lowerCamelCase : List[str] = tf.function(lowercase , jit_compile=lowercase ) _lowerCamelCase : Union[str, Any] = xla_generate(lowercase , lowercase )[0] self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-2 ) ) @require_tf @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def A_ ( self ): super().setUp() _lowerCamelCase : List[Any] = 'facebook/opt-350m' def A_ ( self ): _lowerCamelCase : int = TFOPTForCausalLM.from_pretrained(self.path_model ) _lowerCamelCase : List[Any] = GPTaTokenizer.from_pretrained(self.path_model ) _lowerCamelCase : List[str] = [ 'Today is a beautiful day and I want to', 'In the city of', 'Paris is the capital of France and', 'Computers and mobile phones have taken', ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False _lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' , padding=lowercase , add_special_tokens=lowercase ) _lowerCamelCase : Optional[int] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) _lowerCamelCase : Any = tf.constant( [ [1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70], [-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22], [0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03], [6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77], ] ) self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) ) _lowerCamelCase : Tuple = tf.function(lowercase , jit_compile=lowercase ) _lowerCamelCase : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) ) @require_tf @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' @property def A_ ( self ): return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def A_ ( self ): _lowerCamelCase : str = 'facebook/opt-125m' _lowerCamelCase : Dict = [ 'Today is a beautiful day and I want to', 'In the city of New York, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(lowercase ) _lowerCamelCase : Dict = TFOPTForCausalLM.from_pretrained(lowercase ) for prompt in self.prompts: _lowerCamelCase : int = tokenizer(lowercase , return_tensors='tf' ).input_ids _lowerCamelCase : int = model.generate(lowercase , max_length=10 ) _lowerCamelCase : Any = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) predicted_outputs += generated_string self.assertListEqual(lowercase , lowercase ) def A_ ( self ): _lowerCamelCase : List[Any] = 'facebook/opt-350m' _lowerCamelCase : int = GPTaTokenizer.from_pretrained(lowercase ) _lowerCamelCase : Optional[int] = TFOPTForCausalLM.from_pretrained(lowercase ) _lowerCamelCase : Any = 'left' # use different length sentences to test batching _lowerCamelCase : Optional[int] = [ 'Hello, my dog is a little', 'Today, I', ] _lowerCamelCase : Dict = tokenizer(lowercase , return_tensors='tf' , padding=lowercase ) _lowerCamelCase : int = inputs['input_ids'] _lowerCamelCase : Tuple = model.generate(input_ids=lowercase , attention_mask=inputs['attention_mask'] ) _lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids _lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase ) _lowerCamelCase : Dict = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs['attention_mask'][-1] , tf.intaa ) ) _lowerCamelCase : int = tokenizer(sentences[1] , return_tensors='tf' ).input_ids _lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings ) _lowerCamelCase : List[Any] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) _lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase ) _lowerCamelCase : Optional[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase ) _lowerCamelCase : Optional[Any] = [ 'Hello, my dog is a little bit of a dork.\nI\'m a little bit', 'Today, I was in the middle of a conversation with a friend about the', ] self.assertListEqual(lowercase , lowercase ) self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] ) def A_ ( self ): _lowerCamelCase : Tuple = 'facebook/opt-350m' _lowerCamelCase : List[Any] = [ 'Today is a beautiful day and I want to', 'In the city of San Francisco, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[Any] = GPTaTokenizer.from_pretrained(lowercase ) _lowerCamelCase : Optional[Any] = TFOPTForCausalLM.from_pretrained(lowercase ) for prompt in self.prompts: _lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' ).input_ids _lowerCamelCase : Optional[Any] = model.generate(lowercase , max_length=10 ) _lowerCamelCase : Dict = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) predicted_outputs += generated_string self.assertListEqual(lowercase , lowercase )
12
1
"""simple docstring""" def lowercase ( A_ )-> bool: '''simple docstring''' return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
40
from __future__ import annotations import math def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list: if len(lowerCamelCase__ ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase__ ) != 2 or len(b[0] ) != 2: raise Exception('Matrices are not 2x2' ) __lowerCamelCase : Optional[int] = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]: return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(lowerCamelCase__ ) ) ] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]: return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(lowerCamelCase__ ) ) ] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[list, list, list, list]: if len(lowerCamelCase__ ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception('Odd matrices are not supported!' ) __lowerCamelCase : Tuple = len(lowerCamelCase__ ) __lowerCamelCase : List[Any] = matrix_length // 2 __lowerCamelCase : Dict = [[a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ )] __lowerCamelCase : str = [ [a[i][j] for j in range(lowerCamelCase__ , lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ ) ] __lowerCamelCase : Dict = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ )] __lowerCamelCase : Optional[Any] = [[a[i][j] for j in range(lowerCamelCase__ )] for i in range(lowerCamelCase__ , lowerCamelCase__ )] return top_left, top_right, bot_left, bot_right def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> tuple[int, int]: return len(lowerCamelCase__ ), len(matrix[0] ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None: print('\n'.join(str(lowerCamelCase__ ) for line in matrix ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list: if matrix_dimensions(lowerCamelCase__ ) == (2, 2): return default_matrix_multiplication(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = split_matrix(lowerCamelCase__ ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = split_matrix(lowerCamelCase__ ) __lowerCamelCase : str = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) ) __lowerCamelCase : List[str] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) __lowerCamelCase : List[Any] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) __lowerCamelCase : Tuple = actual_strassen(lowerCamelCase__ , matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) ) __lowerCamelCase : Optional[int] = actual_strassen(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) ) __lowerCamelCase : Dict = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) ) __lowerCamelCase : Tuple = actual_strassen(matrix_subtraction(lowerCamelCase__ , lowerCamelCase__ ) , matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) ) __lowerCamelCase : Dict = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ ) __lowerCamelCase : Tuple = matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase : List[str] = matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase : Any = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ ) , lowerCamelCase__ ) # construct the new matrix from our 4 quadrants __lowerCamelCase : List[Any] = [] for i in range(len(lowerCamelCase__ ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(lowerCamelCase__ ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list: if matrix_dimensions(lowerCamelCase__ )[1] != matrix_dimensions(lowerCamelCase__ )[0]: __lowerCamelCase : Any = ( 'Unable to multiply these matrices, please check the dimensions.\n' F"Matrix A: {matrixa}\n" F"Matrix B: {matrixa}" ) raise Exception(lowerCamelCase__ ) __lowerCamelCase : str = matrix_dimensions(lowerCamelCase__ ) __lowerCamelCase : List[str] = matrix_dimensions(lowerCamelCase__ ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] __lowerCamelCase : str = max(*lowerCamelCase__ , *lowerCamelCase__ ) __lowerCamelCase : List[str] = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase__ ) ) ) ) __lowerCamelCase : Any = matrixa __lowerCamelCase : int = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , lowerCamelCase__ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , lowerCamelCase__ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , lowerCamelCase__ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) __lowerCamelCase : List[str] = actual_strassen(lowerCamelCase__ , lowerCamelCase__ ) # Removing the additional zeros for i in range(0 , lowerCamelCase__ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , lowerCamelCase__ ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": a =[ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] a =[[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
73
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,unittest.TestCase ): _UpperCAmelCase : List[Any] = StableDiffusionInstructPixaPixPipeline _UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"} _UpperCAmelCase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS _UpperCAmelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS _UpperCAmelCase : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS def __lowerCamelCase ( self : str ) ->Union[str, Any]: torch.manual_seed(0 ) lowerCamelCase__ : List[Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , ) lowerCamelCase__ : int = PNDMScheduler(skip_prk_steps=A ) torch.manual_seed(0 ) lowerCamelCase__ : str = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCamelCase__ : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) lowerCamelCase__ : List[Any] = CLIPTextModel(A ) lowerCamelCase__ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowerCamelCase__ : Dict = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __lowerCamelCase ( self : Dict , A : Any , A : Tuple=0 ) ->List[Any]: lowerCamelCase__ : Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A ) lowerCamelCase__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase__ : List[str] = Image.fromarray(np.uinta(A ) ).convert('''RGB''' ) if str(A ).startswith('''mps''' ): lowerCamelCase__ : Any = torch.manual_seed(A ) else: lowerCamelCase__ : int = torch.Generator(device=A ).manual_seed(A ) lowerCamelCase__ : Dict = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''image_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def __lowerCamelCase ( self : List[Any] ) ->Dict: lowerCamelCase__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ : Any = self.get_dummy_components() lowerCamelCase__ : List[str] = StableDiffusionInstructPixaPixPipeline(**A ) lowerCamelCase__ : List[Any] = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) lowerCamelCase__ : Optional[int] = self.get_dummy_inputs(A ) lowerCamelCase__ : Any = sd_pipe(**A ).images lowerCamelCase__ : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) lowerCamelCase__ : List[str] = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowerCamelCase ( self : Optional[int] ) ->List[str]: lowerCamelCase__ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ : Any = self.get_dummy_components() lowerCamelCase__ : Optional[int] = StableDiffusionInstructPixaPixPipeline(**A ) lowerCamelCase__ : str = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) lowerCamelCase__ : Tuple = self.get_dummy_inputs(A ) lowerCamelCase__ : Dict = '''french fries''' lowerCamelCase__ : Dict = sd_pipe(**A , negative_prompt=A ) lowerCamelCase__ : Dict = output.images lowerCamelCase__ : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) lowerCamelCase__ : Dict = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowerCamelCase ( self : Any ) ->int: lowerCamelCase__ : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ : Optional[Any] = self.get_dummy_components() lowerCamelCase__ : Any = StableDiffusionInstructPixaPixPipeline(**A ) lowerCamelCase__ : List[Any] = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) lowerCamelCase__ : Optional[int] = self.get_dummy_inputs(A ) lowerCamelCase__ : Dict = [inputs['''prompt''']] * 2 lowerCamelCase__ : int = np.array(inputs['''image'''] ).astype(np.floataa ) / 2_55.0 lowerCamelCase__ : str = torch.from_numpy(A ).unsqueeze(0 ).to(A ) lowerCamelCase__ : List[Any] = image / 2 + 0.5 lowerCamelCase__ : int = image.permute(0 , 3 , 1 , 2 ) lowerCamelCase__ : str = image.repeat(2 , 1 , 1 , 1 ) lowerCamelCase__ : Optional[Any] = sd_pipe(**A ).images lowerCamelCase__ : List[Any] = image[-1, -3:, -3:, -1] assert image.shape == (2, 3_2, 3_2, 3) lowerCamelCase__ : List[str] = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowerCamelCase ( self : Optional[int] ) ->List[str]: lowerCamelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase__ : List[str] = self.get_dummy_components() lowerCamelCase__ : Optional[Any] = EulerAncestralDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' ) lowerCamelCase__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**A ) lowerCamelCase__ : Any = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) lowerCamelCase__ : List[Any] = self.get_dummy_inputs(A ) lowerCamelCase__ : int = sd_pipe(**A ).images lowerCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1] lowerCamelCase__ : List[str] = [round(A , 4 ) for x in image_slice.flatten().tolist()] print(''','''.join([str(A ) for x in slice] ) ) assert image.shape == (1, 3_2, 3_2, 3) lowerCamelCase__ : List[Any] = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowerCamelCase ( self : Optional[int] ) ->Dict: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def __lowerCamelCase ( self : Dict ) ->Optional[int]: lowerCamelCase__ : Any = self.get_dummy_components() lowerCamelCase__ : str = StableDiffusionInstructPixaPixPipeline(**A ) lowerCamelCase__ : Dict = VaeImageProcessor(do_resize=A , do_normalize=A ) lowerCamelCase__ : Tuple = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) lowerCamelCase__ : int = pipe(**self.get_dummy_inputs_by_type(A , input_image_type='''pt''' ) )[0] lowerCamelCase__ : int = components['''vae'''] lowerCamelCase__ : Any = self.get_dummy_inputs_by_type(A , input_image_type='''pt''' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): lowerCamelCase__ : List[Any] = vae.encode(inputs[image_param] ).latent_dist.mode() lowerCamelCase__ : Union[str, Any] = pipe(**A )[0] lowerCamelCase__ : Any = np.abs(out - out_latents_inputs ).max() self.assertLess(A , 1e-4 , '''passing latents as image input generate different result from passing image''' ) @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCamelCase ( self : List[Any] ) ->Dict: super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self : str , A : Optional[int]=0 ) ->Dict: lowerCamelCase__ : Tuple = torch.manual_seed(A ) lowerCamelCase__ : Tuple = load_image( '''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' ) lowerCamelCase__ : int = { '''prompt''': '''turn him into a cyborg''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''image_guidance_scale''': 1.0, '''output_type''': '''numpy''', } return inputs def __lowerCamelCase ( self : Dict ) ->int: lowerCamelCase__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) pipe.enable_attention_slicing() lowerCamelCase__ : Tuple = self.get_inputs() lowerCamelCase__ : int = pipe(**A ).images lowerCamelCase__ : int = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCamelCase__ : Tuple = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __lowerCamelCase ( self : List[str] ) ->List[Any]: lowerCamelCase__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=A ) lowerCamelCase__ : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) pipe.enable_attention_slicing() lowerCamelCase__ : List[str] = self.get_inputs() lowerCamelCase__ : int = pipe(**A ).images lowerCamelCase__ : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCamelCase__ : List[str] = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __lowerCamelCase ( self : Optional[Any] ) ->List[Any]: lowerCamelCase__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=A ) lowerCamelCase__ : Any = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) pipe.enable_attention_slicing() lowerCamelCase__ : Tuple = self.get_inputs() lowerCamelCase__ : Any = pipe(**A ).images lowerCamelCase__ : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCamelCase__ : Optional[Any] = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __lowerCamelCase ( self : List[Any] ) ->List[Any]: lowerCamelCase__ : str = 0 def callback_fn(A : int , A : int , A : torch.FloatTensor ) -> None: lowerCamelCase__ : Optional[Any] = True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowerCamelCase__ : int = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 6_4) lowerCamelCase__ : Any = latents[0, -3:, -3:, -1] lowerCamelCase__ : Tuple = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: lowerCamelCase__ : Union[str, Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 6_4) lowerCamelCase__ : List[str] = latents[0, -3:, -3:, -1] lowerCamelCase__ : int = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 lowerCamelCase__ : Union[str, Any] = False lowerCamelCase__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=A , torch_dtype=torch.floataa ) lowerCamelCase__ : int = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) pipe.enable_attention_slicing() lowerCamelCase__ : Optional[int] = self.get_inputs() pipe(**A , callback=A , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __lowerCamelCase ( self : Any ) ->List[str]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=A , torch_dtype=torch.floataa ) lowerCamelCase__ : List[str] = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowerCamelCase__ : Tuple = self.get_inputs() lowerCamelCase__ : List[Any] = pipe(**A ) lowerCamelCase__ : Dict = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 1_0**9 def __lowerCamelCase ( self : str ) ->Optional[int]: lowerCamelCase__ : Union[str, Any] = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 lowerCamelCase__ : Any = inputs['''image'''].resize((5_0_4, 5_0_4) ) lowerCamelCase__ : Dict = '''timbrooks/instruct-pix2pix''' lowerCamelCase__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained( A , safety_checker=A , ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) pipe.enable_attention_slicing() lowerCamelCase__ : List[str] = pipe(**A ) lowerCamelCase__ : List[str] = output.images[0] lowerCamelCase__ : Tuple = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert image.shape == (5_0_4, 5_0_4, 3) lowerCamelCase__ : str = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
265
def _a ( UpperCAmelCase ) -> bool: """simple docstring""" return str(UpperCAmelCase ) == str(UpperCAmelCase )[::-1] def _a ( UpperCAmelCase ) -> int: """simple docstring""" return int(UpperCAmelCase ) + int(str(UpperCAmelCase )[::-1] ) def _a ( UpperCAmelCase = 10000 ) -> int: """simple docstring""" lowerCamelCase__ : Tuple = [] for num in range(1 , UpperCAmelCase ): lowerCamelCase__ : List[str] = 0 lowerCamelCase__ : Union[str, Any] = num while iterations < 50: lowerCamelCase__ : Dict = sum_reverse(UpperCAmelCase ) iterations += 1 if is_palindrome(UpperCAmelCase ): break else: lychrel_nums.append(UpperCAmelCase ) return len(UpperCAmelCase ) if __name__ == "__main__": print(F'''{solution() = }''')
265
1
'''simple docstring''' lowercase__ = { "joule": 1.0, "kilojoule": 1000, "megajoule": 1000000, "gigajoule": 1000000000, "wattsecond": 1.0, "watthour": 3600, "kilowatthour": 3600000, "newtonmeter": 1.0, "calorie_nutr": 4186.8, "kilocalorie_nutr": 4186800.00, "electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9, "britishthermalunit_it": 1055.05585, "footpound": 1.355818, } def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: UpperCAmelCase : Any = ( F"""Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n""" F"""Valid values are: {", ".join(_UpperCamelCase )}""" ) raise ValueError(_UpperCamelCase ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
151
"""simple docstring""" import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class lowerCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] , UpperCamelCase : Optional[int] ): '''simple docstring''' super().__init__() __UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = list(model.children() )[:-2] __UpperCAmelCase : Tuple = nn.Sequential(*UpperCamelCase ) __UpperCAmelCase : Tuple = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : List[str] ): '''simple docstring''' __UpperCAmelCase : int = self.pool(self.model(UpperCamelCase ) ) __UpperCAmelCase : str = torch.flatten(UpperCamelCase , start_dim=2 ) __UpperCAmelCase : Union[str, Any] = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : str , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )] __UpperCAmelCase : Optional[Any] = os.path.dirname(UpperCamelCase ) __UpperCAmelCase : List[Any] = tokenizer __UpperCAmelCase : List[str] = labels __UpperCAmelCase : Optional[int] = len(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = max_seq_length __UpperCAmelCase : Tuple = transforms def __len__( self : Tuple ): '''simple docstring''' return len(self.data ) def __getitem__( self : int , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) ) __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1] __UpperCAmelCase : Union[str, Any] = sentence[: self.max_seq_length] __UpperCAmelCase : Tuple = torch.zeros(self.n_classes ) __UpperCAmelCase : str = 1 __UpperCAmelCase : List[str] = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" ) __UpperCAmelCase : Any = self.transforms(UpperCamelCase ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : int = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [len(row["""sentence"""] ) for row in batch] __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase ) __UpperCAmelCase : int = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) __UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ): __UpperCAmelCase : List[str] = input_row["""sentence"""] __UpperCAmelCase : Optional[Any] = 1 __UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] ) __UpperCAmelCase : Optional[int] = torch.stack([row["""label"""] for row in batch] ) __UpperCAmelCase : int = torch.stack([row["""image_start_token"""] for row in batch] ) __UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ) -> List[str]: '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ) -> Dict: '''simple docstring''' return transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ), ] )
115
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule _snake_case = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys _snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
363
"""simple docstring""" from math import factorial def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' if successes > trials: raise ValueError("""successes must be lower or equal to trials""" ) if trials < 0 or successes < 0: raise ValueError("""the function is defined for non-negative integers""" ) if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError("""the function is defined for non-negative integers""" ) if not 0 < prob < 1: raise ValueError("""prob has to be in range of 1 - 0""" ) _a : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! _a : Optional[int] = float(factorial(UpperCamelCase__ ) ) coefficient /= factorial(UpperCamelCase__ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('Probability of 2 successes out of 4 trails') print('with probability of 0.75 is:', end=' ') print(binomial_distribution(2, 4, 0.75))
324
0
import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa lowerCAmelCase_ = logging.getLogger(__name__) class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = "summarization" SCREAMING_SNAKE_CASE : Union[str, Any] = ["loss"] SCREAMING_SNAKE_CASE : List[Any] = ROUGE_KEYS SCREAMING_SNAKE_CASE : List[Any] = "rouge2" def __init__( self : int , _UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) ->List[Any]: if hparams.sortish_sampler and hparams.gpus > 1: snake_case_ = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' ) if hparams.sortish_sampler: raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' ) super().__init__(_UpperCamelCase , num_labels=_UpperCamelCase , mode=self.mode , **_UpperCamelCase ) use_task_specific_params(self.model , '''summarization''' ) save_git_info(self.hparams.output_dir ) snake_case_ = Path(self.output_dir ) / '''metrics.json''' snake_case_ = Path(self.output_dir ) / '''hparams.pkl''' pickle_save(self.hparams , self.hparams_save_path ) snake_case_ = 0 snake_case_ = defaultdict(_UpperCamelCase ) snake_case_ = self.config.model_type snake_case_ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size snake_case_ = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } snake_case_ = { '''train''': self.hparams.n_train, '''val''': self.hparams.n_val, '''test''': self.hparams.n_test, } snake_case_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} snake_case_ = { '''train''': self.hparams.max_target_length, '''val''': self.hparams.val_max_target_length, '''test''': self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}''' assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}''' if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) snake_case_ = get_git_info()['''repo_sha'''] snake_case_ = hparams.num_workers snake_case_ = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _UpperCamelCase ): snake_case_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang] snake_case_ = self.decoder_start_token_id snake_case_ = ( SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset ) snake_case_ = False snake_case_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: snake_case_ = self.hparams.eval_max_gen_length else: snake_case_ = self.model.config.max_length snake_case_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def snake_case__( self : List[Any] , _UpperCamelCase : Dict[str, torch.Tensor] ) ->Dict[str, List[str]]: snake_case_ = { k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items() } save_json(_UpperCamelCase , Path(self.output_dir ) / '''text_batch.json''' ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' ) snake_case_ = True return readable_batch def snake_case__( self : Tuple , _UpperCamelCase : Union[str, Any] , **_UpperCamelCase : List[str] ) ->List[Any]: return self.model(_UpperCamelCase , **_UpperCamelCase ) def snake_case__( self : int , _UpperCamelCase : List[int] ) ->List[Any]: snake_case_ = self.tokenizer.batch_decode( _UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase ) return lmap(str.strip , _UpperCamelCase ) def snake_case__( self : List[Any] , _UpperCamelCase : dict ) ->Tuple: snake_case_ = self.tokenizer.pad_token_id snake_case_, snake_case_ = batch['''input_ids'''], batch['''attention_mask'''] snake_case_ = batch['''labels'''] if isinstance(self.model , _UpperCamelCase ): snake_case_ = self.model._shift_right(_UpperCamelCase ) else: snake_case_ = shift_tokens_right(_UpperCamelCase , _UpperCamelCase ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero snake_case_ = decoder_input_ids self.save_readable_batch(_UpperCamelCase ) snake_case_ = self(_UpperCamelCase , attention_mask=_UpperCamelCase , decoder_input_ids=_UpperCamelCase , use_cache=_UpperCamelCase ) snake_case_ = outputs['''logits'''] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id snake_case_ = nn.CrossEntropyLoss(ignore_index=_UpperCamelCase ) assert lm_logits.shape[-1] == self.vocab_size snake_case_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: snake_case_ = nn.functional.log_softmax(_UpperCamelCase , dim=-1 ) snake_case_, snake_case_ = label_smoothed_nll_loss( _UpperCamelCase , _UpperCamelCase , self.hparams.label_smoothing , ignore_index=_UpperCamelCase ) return (loss,) @property def snake_case__( self : Tuple ) ->int: return self.tokenizer.pad_token_id def snake_case__( self : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : str ) ->Dict: snake_case_ = self._step(_UpperCamelCase ) snake_case_ = dict(zip(self.loss_names , _UpperCamelCase ) ) # tokens per batch snake_case_ = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum() snake_case_ = batch['''input_ids'''].shape[0] snake_case_ = batch['''input_ids'''].eq(self.pad ).sum() snake_case_ = batch['''input_ids'''].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def snake_case__( self : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict ) ->Dict: return self._generative_step(_UpperCamelCase ) def snake_case__( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[Any]="val" ) ->Dict: self.step_count += 1 snake_case_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} snake_case_ = losses['''loss'''] snake_case_ = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len'''] } snake_case_ = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) snake_case_ = torch.tensor(_UpperCamelCase ).type_as(_UpperCamelCase ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(_UpperCamelCase ) snake_case_ = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()} snake_case_ = self.step_count self.metrics[prefix].append(_UpperCamelCase ) # callback writes this to self.metrics_save_path snake_case_ = flatten_list([x['''preds'''] for x in outputs] ) return { "log": all_metrics, "preds": preds, f'''{prefix}_loss''': loss, f'''{prefix}_{self.val_metric}''': metric_tensor, } def snake_case__( self : int , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] ) ->Dict: return calculate_rouge(_UpperCamelCase , _UpperCamelCase ) def snake_case__( self : str , _UpperCamelCase : dict ) ->dict: snake_case_ = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') snake_case_ = self.model.generate( batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_UpperCamelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) snake_case_ = (time.time() - ta) / batch['''input_ids'''].shape[0] snake_case_ = self.ids_to_clean_text(_UpperCamelCase ) snake_case_ = self.ids_to_clean_text(batch['''labels'''] ) snake_case_ = self._step(_UpperCamelCase ) snake_case_ = dict(zip(self.loss_names , _UpperCamelCase ) ) snake_case_ = self.calc_generative_metrics(_UpperCamelCase , _UpperCamelCase ) snake_case_ = np.mean(lmap(_UpperCamelCase , _UpperCamelCase ) ) base_metrics.update(gen_time=_UpperCamelCase , gen_len=_UpperCamelCase , preds=_UpperCamelCase , target=_UpperCamelCase , **_UpperCamelCase ) return base_metrics def snake_case__( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : Dict ) ->Optional[int]: return self._generative_step(_UpperCamelCase ) def snake_case__( self : Optional[int] , _UpperCamelCase : Optional[int] ) ->List[Any]: return self.validation_epoch_end(_UpperCamelCase , prefix='''test''' ) def snake_case__( self : Union[str, Any] , _UpperCamelCase : Any ) ->SeqaSeqDataset: snake_case_ = self.n_obs[type_path] snake_case_ = self.target_lens[type_path] snake_case_ = self.dataset_class( self.tokenizer , type_path=_UpperCamelCase , n_obs=_UpperCamelCase , max_target_length=_UpperCamelCase , **self.dataset_kwargs , ) return dataset def snake_case__( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : bool = False ) ->DataLoader: snake_case_ = self.get_dataset(_UpperCamelCase ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": snake_case_ = dataset.make_sortish_sampler(_UpperCamelCase , distributed=self.hparams.gpus > 1 ) return DataLoader( _UpperCamelCase , batch_size=_UpperCamelCase , collate_fn=dataset.collate_fn , shuffle=_UpperCamelCase , num_workers=self.num_workers , sampler=_UpperCamelCase , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": snake_case_ = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( _UpperCamelCase , batch_sampler=_UpperCamelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( _UpperCamelCase , batch_size=_UpperCamelCase , collate_fn=dataset.collate_fn , shuffle=_UpperCamelCase , num_workers=self.num_workers , sampler=_UpperCamelCase , ) def snake_case__( self : List[str] ) ->DataLoader: snake_case_ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_UpperCamelCase ) return dataloader def snake_case__( self : Union[str, Any] ) ->DataLoader: return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size ) def snake_case__( self : Tuple ) ->DataLoader: return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size ) @staticmethod def snake_case__( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] ) ->Tuple: BaseTransformer.add_model_specific_args(_UpperCamelCase , _UpperCamelCase ) add_generic_args(_UpperCamelCase , _UpperCamelCase ) parser.add_argument( '''--max_source_length''' , default=1_0_2_4 , type=_UpperCamelCase , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--max_target_length''' , default=5_6 , type=_UpperCamelCase , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--val_max_target_length''' , default=1_4_2 , type=_UpperCamelCase , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--test_max_target_length''' , default=1_4_2 , type=_UpperCamelCase , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument('''--freeze_encoder''' , action='''store_true''' ) parser.add_argument('''--freeze_embeds''' , action='''store_true''' ) parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_UpperCamelCase ) parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_UpperCamelCase ) parser.add_argument('''--max_tokens_per_batch''' , type=_UpperCamelCase , default=_UpperCamelCase ) parser.add_argument('''--logger_name''' , type=_UpperCamelCase , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' ) parser.add_argument('''--n_train''' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help='''# examples. -1 means use all.''' ) parser.add_argument('''--n_val''' , type=_UpperCamelCase , default=5_0_0 , required=_UpperCamelCase , help='''# examples. -1 means use all.''' ) parser.add_argument('''--n_test''' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help='''# examples. -1 means use all.''' ) parser.add_argument( '''--task''' , type=_UpperCamelCase , default='''summarization''' , required=_UpperCamelCase , help='''# examples. -1 means use all.''' ) parser.add_argument('''--label_smoothing''' , type=_UpperCamelCase , default=0.0 , required=_UpperCamelCase ) parser.add_argument('''--src_lang''' , type=_UpperCamelCase , default='''''' , required=_UpperCamelCase ) parser.add_argument('''--tgt_lang''' , type=_UpperCamelCase , default='''''' , required=_UpperCamelCase ) parser.add_argument('''--eval_beams''' , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase ) parser.add_argument( '''--val_metric''' , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase , choices=['''bleu''', '''rouge2''', '''loss''', None] ) parser.add_argument('''--eval_max_gen_length''' , type=_UpperCamelCase , default=_UpperCamelCase , help='''never generate more than n tokens''' ) parser.add_argument('''--save_top_k''' , type=_UpperCamelCase , default=1 , required=_UpperCamelCase , help='''How many checkpoints to save''' ) parser.add_argument( '''--early_stopping_patience''' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help=( '''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So''' ''' val_check_interval will effect it.''' ) , ) return parser class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = "translation" SCREAMING_SNAKE_CASE : Dict = ["loss"] SCREAMING_SNAKE_CASE : str = ["bleu"] SCREAMING_SNAKE_CASE : List[str] = "bleu" def __init__( self : Optional[Any] , _UpperCamelCase : Any , **_UpperCamelCase : Any ) ->Optional[int]: super().__init__(_UpperCamelCase , **_UpperCamelCase ) snake_case_ = hparams.src_lang snake_case_ = hparams.tgt_lang def snake_case__( self : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] ) ->dict: return calculate_bleu(_UpperCamelCase , _UpperCamelCase ) def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ): Path(args.output_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) check_output_dir(SCREAMING_SNAKE_CASE__ , expected_items=3 ) if model is None: if "summarization" in args.task: snake_case_ = SummarizationModule(SCREAMING_SNAKE_CASE__ ) else: snake_case_ = TranslationModule(SCREAMING_SNAKE_CASE__ ) snake_case_ = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith('''/tmp''' ) or str(args.output_dir ).startswith('''/var''' ) ): snake_case_ = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger snake_case_ = os.environ.get('''WANDB_PROJECT''' , SCREAMING_SNAKE_CASE__ ) snake_case_ = WandbLogger(name=model.output_dir.name , project=SCREAMING_SNAKE_CASE__ ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger snake_case_ = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' ) if args.early_stopping_patience >= 0: snake_case_ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: snake_case_ = False snake_case_ = args.val_metric == '''loss''' snake_case_ = generic_train( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , SCREAMING_SNAKE_CASE__ ) , early_stopping_callback=SCREAMING_SNAKE_CASE__ , logger=SCREAMING_SNAKE_CASE__ , ) pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' ) if not args.do_predict: return model snake_case_ = '''''' snake_case_ = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=SCREAMING_SNAKE_CASE__ ) ) if checkpoints: snake_case_ = checkpoints[-1] snake_case_ = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() lowerCAmelCase_ = pl.Trainer.add_argparse_args(parser) lowerCAmelCase_ = SummarizationModule.add_model_specific_args(parser, os.getcwd()) lowerCAmelCase_ = parser.parse_args() main(args)
8
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : int = logging.get_logger(__name__) A__ : List[str] = { 'google/pix2struct-textcaps-base': ( 'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json' ), } class lowercase__ ( snake_case__ ): _UpperCAmelCase :List[Any] = "pix2struct_text_model" _UpperCAmelCase :str = ["past_key_values"] _UpperCAmelCase :str = { "hidden_size": "hidden_size", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Dict , snake_case__ : Any=5_0244 , snake_case__ : Optional[int]=768 , snake_case__ : Dict=64 , snake_case__ : List[str]=2048 , snake_case__ : Dict=12 , snake_case__ : Any=12 , snake_case__ : Dict=32 , snake_case__ : int=128 , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=1E-6 , snake_case__ : Any=1.0 , snake_case__ : int="gelu_new" , snake_case__ : Optional[Any]=0 , snake_case__ : Any=False , snake_case__ : Any=0 , snake_case__ : Any=1 , snake_case__ : Optional[int]=False , snake_case__ : Tuple=True , **snake_case__ : Any , ): lowerCamelCase_ : List[str] =vocab_size lowerCamelCase_ : Tuple =hidden_size lowerCamelCase_ : Optional[int] =d_kv lowerCamelCase_ : List[Any] =d_ff lowerCamelCase_ : Tuple =num_layers lowerCamelCase_ : Optional[int] =num_heads lowerCamelCase_ : Any =relative_attention_num_buckets lowerCamelCase_ : Optional[int] =relative_attention_max_distance lowerCamelCase_ : List[Any] =dropout_rate lowerCamelCase_ : str =layer_norm_epsilon lowerCamelCase_ : int =initializer_factor lowerCamelCase_ : str =use_cache lowerCamelCase_ : int =eos_token_id lowerCamelCase_ : Optional[Any] =decoder_start_token_id # for backwards compatibility lowerCamelCase_ : Optional[Any] =dense_act_fn super().__init__( pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , ) @classmethod def UpperCAmelCase__ ( cls : Tuple , snake_case__ : Union[str, os.PathLike] , **snake_case__ : str ): cls._set_token_in_kwargs(snake_case__ ) lowerCamelCase_ , lowerCamelCase_ : Any =cls.get_config_dict(snake_case__ , **snake_case__ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowerCamelCase_ : List[Any] =config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case__ , **snake_case__ ) class lowercase__ ( snake_case__ ): _UpperCAmelCase :List[Any] = "pix2struct_vision_model" def __init__( self : Optional[int] , snake_case__ : Tuple=768 , snake_case__ : str=768 , snake_case__ : Union[str, Any]=2048 , snake_case__ : Tuple=64 , snake_case__ : List[Any]=12 , snake_case__ : Dict=12 , snake_case__ : int="gelu_new" , snake_case__ : str=1E-6 , snake_case__ : int=0.0 , snake_case__ : int=0.0 , snake_case__ : Dict=1E-10 , snake_case__ : Tuple=1.0 , snake_case__ : int=4096 , snake_case__ : Tuple=32 , snake_case__ : List[str]=128 , **snake_case__ : List[Any] , ): super().__init__(**snake_case__ ) lowerCamelCase_ : int =hidden_size lowerCamelCase_ : List[Any] =patch_embed_hidden_size lowerCamelCase_ : Tuple =d_ff lowerCamelCase_ : List[Any] =dropout_rate lowerCamelCase_ : Dict =num_hidden_layers lowerCamelCase_ : List[str] =num_attention_heads lowerCamelCase_ : Optional[Any] =initializer_range lowerCamelCase_ : int =initializer_factor lowerCamelCase_ : Any =attention_dropout lowerCamelCase_ : List[str] =layer_norm_eps lowerCamelCase_ : int =dense_act_fn lowerCamelCase_ : Optional[Any] =seq_len lowerCamelCase_ : Optional[int] =relative_attention_num_buckets lowerCamelCase_ : Optional[int] =relative_attention_max_distance lowerCamelCase_ : Dict =d_kv @classmethod def UpperCAmelCase__ ( cls : Optional[Any] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Dict ): cls._set_token_in_kwargs(snake_case__ ) lowerCamelCase_ , lowerCamelCase_ : Dict =cls.get_config_dict(snake_case__ , **snake_case__ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("model_type" ) == "pix2struct": lowerCamelCase_ : List[Any] =config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(snake_case__ , **snake_case__ ) class lowercase__ ( snake_case__ ): _UpperCAmelCase :str = "pix2struct" _UpperCAmelCase :List[str] = True def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : Optional[Any]=None , snake_case__ : List[str]=1.0 , snake_case__ : List[Any]=0.02 , snake_case__ : List[Any]=False , snake_case__ : int=False , snake_case__ : Any=True , **snake_case__ : List[Any] , ): super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ ) if text_config is None: lowerCamelCase_ : Dict ={} logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." ) if vision_config is None: lowerCamelCase_ : int ={} logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." ) lowerCamelCase_ : Any =PixaStructTextConfig(**snake_case__ ) lowerCamelCase_ : Optional[Any] =PixaStructVisionConfig(**snake_case__ ) lowerCamelCase_ : str =self.text_config.decoder_start_token_id lowerCamelCase_ : Optional[int] =self.text_config.pad_token_id lowerCamelCase_ : List[Any] =self.text_config.eos_token_id lowerCamelCase_ : int =initializer_factor lowerCamelCase_ : Optional[Any] =initializer_range lowerCamelCase_ : Any =self.initializer_range lowerCamelCase_ : List[Any] =self.initializer_range lowerCamelCase_ : List[str] =is_vqa @classmethod def UpperCAmelCase__ ( cls : List[str] , snake_case__ : PixaStructTextConfig , snake_case__ : PixaStructVisionConfig , **snake_case__ : Optional[int] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ ) def UpperCAmelCase__ ( self : Any ): lowerCamelCase_ : Tuple =copy.deepcopy(self.__dict__ ) lowerCamelCase_ : Dict =self.text_config.to_dict() lowerCamelCase_ : Union[str, Any] =self.vision_config.to_dict() lowerCamelCase_ : Dict =self.__class__.model_type return output
144
0
import argparse import os import shutil from pathlib import Path import onnx import torch from packaging import version from torch.onnx import export from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline _lowerCAmelCase : List[str] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : tuple , _snake_case : Path , _snake_case : Any , _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : int=False , ): """simple docstring""" output_path.parent.mkdir(parents=_snake_case , exist_ok=_snake_case ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( _snake_case , _snake_case , f=output_path.as_posix() , input_names=_snake_case , output_names=_snake_case , dynamic_axes=_snake_case , do_constant_folding=_snake_case , use_external_data_format=_snake_case , enable_onnx_checker=_snake_case , opset_version=_snake_case , ) else: export( _snake_case , _snake_case , f=output_path.as_posix() , input_names=_snake_case , output_names=_snake_case , dynamic_axes=_snake_case , do_constant_folding=_snake_case , opset_version=_snake_case , ) @torch.no_grad() def UpperCamelCase_( _snake_case : str , _snake_case : str , _snake_case : int , _snake_case : bool = False ): """simple docstring""" __a =torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): __a ='cuda' elif fpaa and not torch.cuda.is_available(): raise ValueError('`float16` model export is only supported on GPUs with CUDA' ) else: __a ='cpu' __a =StableDiffusionPipeline.from_pretrained(_snake_case , torch_dtype=_snake_case ).to(_snake_case ) __a =Path(_snake_case ) # TEXT ENCODER __a =pipeline.text_encoder.config.max_position_embeddings __a =pipeline.text_encoder.config.hidden_size __a =pipeline.tokenizer( 'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=_snake_case , return_tensors='pt' , ) onnx_export( pipeline.text_encoder , model_args=(text_input.input_ids.to(device=_snake_case , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={ 'input_ids': {0: 'batch', 1: 'sequence'}, } , opset=_snake_case , ) del pipeline.text_encoder # UNET __a =pipeline.unet.config.in_channels __a =pipeline.unet.config.sample_size __a =output_path / 'unet' / 'model.onnx' onnx_export( pipeline.unet , model_args=( torch.randn(2 , _snake_case , _snake_case , _snake_case ).to(device=_snake_case , dtype=_snake_case ), torch.randn(2 ).to(device=_snake_case , dtype=_snake_case ), torch.randn(2 , _snake_case , _snake_case ).to(device=_snake_case , dtype=_snake_case ), False, ) , output_path=_snake_case , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={ 'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, 'timestep': {0: 'batch'}, 'encoder_hidden_states': {0: 'batch', 1: 'sequence'}, } , opset=_snake_case , use_external_data_format=_snake_case , ) __a =str(unet_path.absolute().as_posix() ) __a =os.path.dirname(_snake_case ) __a =onnx.load(_snake_case ) # clean up existing tensor files shutil.rmtree(_snake_case ) os.mkdir(_snake_case ) # collate external tensor files into one onnx.save_model( _snake_case , _snake_case , save_as_external_data=_snake_case , all_tensors_to_one_file=_snake_case , location='weights.pb' , convert_attribute=_snake_case , ) del pipeline.unet # VAE ENCODER __a =pipeline.vae __a =vae_encoder.config.in_channels __a =vae_encoder.config.sample_size # need to get the raw tensor output (sample) from the encoder __a =lambda _snake_case , _snake_case : vae_encoder.encode(_snake_case , _snake_case )[0].sample() onnx_export( _snake_case , model_args=( torch.randn(1 , _snake_case , _snake_case , _snake_case ).to(device=_snake_case , dtype=_snake_case ), False, ) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={ 'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, } , opset=_snake_case , ) # VAE DECODER __a =pipeline.vae __a =vae_decoder.config.latent_channels __a =vae_decoder.config.out_channels # forward only through the decoder part __a =vae_encoder.decode onnx_export( _snake_case , model_args=( torch.randn(1 , _snake_case , _snake_case , _snake_case ).to(device=_snake_case , dtype=_snake_case ), False, ) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={ 'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, } , opset=_snake_case , ) del pipeline.vae # SAFETY CHECKER if pipeline.safety_checker is not None: __a =pipeline.safety_checker __a =safety_checker.config.vision_config.num_channels __a =safety_checker.config.vision_config.image_size __a =safety_checker.forward_onnx onnx_export( pipeline.safety_checker , model_args=( torch.randn( 1 , _snake_case , _snake_case , _snake_case , ).to(device=_snake_case , dtype=_snake_case ), torch.randn(1 , _snake_case , _snake_case , _snake_case ).to(device=_snake_case , dtype=_snake_case ), ) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={ 'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, 'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'}, } , opset=_snake_case , ) del pipeline.safety_checker __a =OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' ) __a =pipeline.feature_extractor else: __a =None __a =None __a =OnnxStableDiffusionPipeline( vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=_snake_case , feature_extractor=_snake_case , requires_safety_checker=safety_checker is not None , ) onnx_pipeline.save_pretrained(_snake_case ) print('ONNX pipeline saved to' , _snake_case ) del pipeline del onnx_pipeline __a =OnnxStableDiffusionPipeline.from_pretrained(_snake_case , provider='CPUExecutionProvider' ) print('ONNX pipeline is loadable' ) if __name__ == "__main__": _lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=14, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") _lowerCAmelCase : List[str] = parser.parse_args() convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
308
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class __magic_name__ : def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=False , __snake_case=False , __snake_case=False , __snake_case=2 , __snake_case=99 , __snake_case=0 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=2 , __snake_case=0.02 , __snake_case=2 , __snake_case=4 , __snake_case="last" , __snake_case=True , __snake_case=None , __snake_case=0 , ) -> Optional[Any]: '''simple docstring''' __a =parent __a =batch_size __a =seq_length __a =is_training __a =use_input_lengths __a =use_token_type_ids __a =use_labels __a =gelu_activation __a =sinusoidal_embeddings __a =causal __a =asm __a =n_langs __a =vocab_size __a =n_special __a =hidden_size __a =num_hidden_layers __a =num_attention_heads __a =hidden_dropout_prob __a =attention_probs_dropout_prob __a =max_position_embeddings __a =type_sequence_label_size __a =initializer_range __a =num_labels __a =num_choices __a =summary_type __a =use_proj __a =scope __a =bos_token_id def __magic_name__ ( self ) -> Any: '''simple docstring''' __a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a =random_attention_mask([self.batch_size, self.seq_length] ) __a =None if self.use_input_lengths: __a =( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __a =None if self.use_token_type_ids: __a =ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __a =None __a =None __a =None if self.use_labels: __a =ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a =ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __a =ids_tensor([self.batch_size] , 2 ).float() __a =ids_tensor([self.batch_size] , self.num_choices ) __a =self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __magic_name__ ( self ) -> Any: '''simple docstring''' return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> List[Any]: '''simple docstring''' __a =XLMModel(config=__snake_case ) model.to(__snake_case ) model.eval() __a =model(__snake_case , lengths=__snake_case , langs=__snake_case ) __a =model(__snake_case , langs=__snake_case ) __a =model(__snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[int]: '''simple docstring''' __a =XLMWithLMHeadModel(__snake_case ) model.to(__snake_case ) model.eval() __a =model(__snake_case , token_type_ids=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Dict: '''simple docstring''' __a =XLMForQuestionAnsweringSimple(__snake_case ) model.to(__snake_case ) model.eval() __a =model(__snake_case ) __a =model(__snake_case , start_positions=__snake_case , end_positions=__snake_case ) __a =outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> List[Any]: '''simple docstring''' __a =XLMForQuestionAnswering(__snake_case ) model.to(__snake_case ) model.eval() __a =model(__snake_case ) __a =model( __snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , p_mask=__snake_case , ) __a =model( __snake_case , start_positions=__snake_case , end_positions=__snake_case , cls_index=__snake_case , is_impossible=__snake_case , ) ((__a) , ) =result_with_labels.to_tuple() __a =model(__snake_case , start_positions=__snake_case , end_positions=__snake_case ) ((__a) , ) =result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[Any]: '''simple docstring''' __a =XLMForSequenceClassification(__snake_case ) model.to(__snake_case ) model.eval() __a =model(__snake_case ) __a =model(__snake_case , labels=__snake_case ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Any: '''simple docstring''' __a =self.num_labels __a =XLMForTokenClassification(__snake_case ) model.to(__snake_case ) model.eval() __a =model(__snake_case , attention_mask=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Tuple: '''simple docstring''' __a =self.num_choices __a =XLMForMultipleChoice(config=__snake_case ) model.to(__snake_case ) model.eval() __a =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a =model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __magic_name__ ( self ) -> Union[str, Any]: '''simple docstring''' __a =self.prepare_config_and_inputs() ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) =config_and_inputs __a ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable SCREAMING_SNAKE_CASE = ( { 'feature-extraction': XLMModel, 'fill-mask': XLMWithLMHeadModel, 'question-answering': XLMForQuestionAnsweringSimple, 'text-classification': XLMForSequenceClassification, 'text-generation': XLMWithLMHeadModel, 'token-classification': XLMForTokenClassification, 'zero-shot': XLMForSequenceClassification, } if is_torch_available() else {} ) def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) -> int: '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __magic_name__ ( self , __snake_case , __snake_case , __snake_case=False ) -> str: '''simple docstring''' __a =super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": __a =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__snake_case ) __a =torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__snake_case ) return inputs_dict def __magic_name__ ( self ) -> Tuple: '''simple docstring''' __a =XLMModelTester(self ) __a =ConfigTester(self , config_class=__snake_case , emb_dim=37 ) def __magic_name__ ( self ) -> str: '''simple docstring''' self.config_tester.run_common_tests() def __magic_name__ ( self ) -> List[str]: '''simple docstring''' __a =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*__snake_case ) def __magic_name__ ( self ) -> List[str]: '''simple docstring''' __a =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*__snake_case ) def __magic_name__ ( self ) -> List[str]: '''simple docstring''' __a =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*__snake_case ) def __magic_name__ ( self ) -> Optional[int]: '''simple docstring''' __a =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*__snake_case ) def __magic_name__ ( self ) -> Any: '''simple docstring''' __a =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*__snake_case ) def __magic_name__ ( self ) -> Optional[int]: '''simple docstring''' __a =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*__snake_case ) def __magic_name__ ( self ) -> Any: '''simple docstring''' __a =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*__snake_case ) def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1 ) -> Optional[Any]: '''simple docstring''' self.assertIsInstance(__snake_case , __snake_case ) self.assertListEqual( [isinstance(__snake_case , __snake_case ) for iter_attentions in attentions] , [True] * len(__snake_case ) ) self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(__snake_case ): # adds PAD dummy token __a =min_length + idx + 1 __a =min_length + idx + 1 __a =( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__snake_case ) ) def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=False , __snake_case=1 ) -> Dict: '''simple docstring''' self.assertIsInstance(__snake_case , __snake_case ) self.assertListEqual( [isinstance(__snake_case , __snake_case ) for iter_hidden_states in hidden_states] , [True] * len(__snake_case ) , ) self.assertEqual(len(__snake_case ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(__snake_case ): # adds PAD dummy token __a =min_length + idx + 1 __a =(batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__snake_case ) , ) pass @slow def __magic_name__ ( self ) -> Union[str, Any]: '''simple docstring''' for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a =XLMModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) @require_torch class __magic_name__ ( unittest.TestCase ): @slow def __magic_name__ ( self ) -> Tuple: '''simple docstring''' __a =XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' ) model.to(__snake_case ) __a =torch.tensor([[14, 447]] , dtype=torch.long , device=__snake_case ) # the president __a =[ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference __a =model.generate(__snake_case , do_sample=__snake_case ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __snake_case )
308
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _snake_case = 16 _snake_case = 32 def lowerCAmelCase_ ( snake_case_,snake_case_ = 16 ): _A : List[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _A : Any = load_dataset("""glue""","""mrpc""" ) def tokenize_function(snake_case_ ): # max_length=None => use the model max length (it's actually the default) _A : int = tokenizer(examples["""sentence1"""],examples["""sentence2"""],truncation=snake_case_,max_length=snake_case_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _A : Dict = datasets.map( snake_case_,batched=snake_case_,remove_columns=["""idx""", """sentence1""", """sentence2"""],) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _A : List[Any] = tokenized_datasets.rename_column("""label""","""labels""" ) def collate_fn(snake_case_ ): # On TPU it's best to pad everything to the same length or training will be very slow. _A : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _A : Any = 16 elif accelerator.mixed_precision != "no": _A : int = 8 else: _A : str = None return tokenizer.pad( snake_case_,padding="""longest""",max_length=snake_case_,pad_to_multiple_of=snake_case_,return_tensors="""pt""",) # Instantiate dataloaders. _A : Dict = DataLoader( tokenized_datasets["""train"""],shuffle=snake_case_,collate_fn=snake_case_,batch_size=snake_case_ ) _A : str = DataLoader( tokenized_datasets["""validation"""],shuffle=snake_case_,collate_fn=snake_case_,batch_size=snake_case_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _snake_case = mocked_dataloaders # noqa: F811 def lowerCAmelCase_ ( snake_case_,snake_case_ ): # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""",snake_case_ ) == "1": _A : Optional[Any] = 2 # New Code # _A : str = int(args.gradient_accumulation_steps ) # Initialize accelerator _A : Dict = Accelerator( cpu=args.cpu,mixed_precision=args.mixed_precision,gradient_accumulation_steps=snake_case_ ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( """Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _A : int = config["""lr"""] _A : str = int(config["""num_epochs"""] ) _A : Tuple = int(config["""seed"""] ) _A : Optional[Any] = int(config["""batch_size"""] ) _A : int = evaluate.load("""glue""","""mrpc""" ) set_seed(snake_case_ ) _A , _A : Dict = get_dataloaders(snake_case_,snake_case_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _A : Dict = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""",return_dict=snake_case_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _A : List[Any] = model.to(accelerator.device ) # Instantiate optimizer _A : Any = AdamW(params=model.parameters(),lr=snake_case_ ) # Instantiate scheduler _A : Optional[Any] = get_linear_schedule_with_warmup( optimizer=snake_case_,num_warmup_steps=100,num_training_steps=(len(snake_case_ ) * num_epochs),) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _A , _A , _A , _A , _A : List[Any] = accelerator.prepare( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ) # Now we train the model for epoch in range(snake_case_ ): model.train() for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(snake_case_ ): _A : List[Any] = model(**snake_case_ ) _A : Dict = output.loss accelerator.backward(snake_case_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _A : Dict = model(**snake_case_ ) _A : Any = outputs.logits.argmax(dim=-1 ) _A , _A : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=snake_case_,references=snake_case_,) _A : int = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''',snake_case_ ) def lowerCAmelCase_ ( ): _A : Any = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""",type=snake_case_,default=snake_case_,choices=["""no""", """fp16""", """bf16""", """fp8"""],help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""",) # New Code # parser.add_argument( """--gradient_accumulation_steps""",type=snake_case_,default=1,help="""The number of minibatches to be ran before gradients are accumulated.""",) parser.add_argument("""--cpu""",action="""store_true""",help="""If passed, will train on the CPU.""" ) _A : Tuple = parser.parse_args() _A : List[str] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(snake_case_,snake_case_ ) if __name__ == "__main__": main()
26
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( "The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> Optional[int]: super().__init__(_a ) _A : Union[str, Any] = RobertaEmbeddings(_a ) self.init_weights() @add_start_docstrings( "RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ",UpperCamelCase__,) class lowercase ( UpperCamelCase__ ): _a = RobertaConfig _a = "roberta" def __init__( self , _a ) -> str: super().__init__(_a ) _A : Any = config.num_labels _A : Dict = config.num_hidden_layers _A : List[str] = DeeRobertaModel(_a ) _A : int = nn.Dropout(config.hidden_dropout_prob ) _A : int = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(_a ) def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> Any: _A : Optional[int] = self.num_layers try: _A : List[str] = self.roberta( _a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , ) _A : List[str] = outputs[1] _A : List[str] = self.dropout(_a ) _A : Optional[Any] = self.classifier(_a ) _A : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _A : List[Any] = e.message _A : Optional[int] = e.exit_layer _A : Optional[int] = outputs[0] if not self.training: _A : int = entropy(_a ) _A : int = [] _A : int = [] if labels is not None: if self.num_labels == 1: # We are doing regression _A : Union[str, Any] = MSELoss() _A : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _A : Optional[Any] = [] for highway_exit in outputs[-1]: _A : Tuple = highway_exit[0] if not self.training: highway_logits_all.append(_a ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _A : List[str] = MSELoss() _A : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _A : List[Any] = CrossEntropyLoss() _A : Tuple = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(_a ) if train_highway: _A : Dict = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _A : int = (loss,) + outputs if not self.training: _A : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _A : Union[str, Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
26
1
"""simple docstring""" import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ ( unittest.TestCase ): """simple docstring""" def __init__( self , lowercase , lowercase=3 , lowercase=32 , lowercase=3 , lowercase=10 , lowercase=[10, 20, 30, 40] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , ) -> Optional[Any]: '''simple docstring''' a__ : List[Any] = parent a__ : List[str] = batch_size a__ : Dict = image_size a__ : Any = num_channels a__ : Union[str, Any] = embeddings_size a__ : Dict = hidden_sizes a__ : List[str] = depths a__ : int = is_training a__ : Tuple = use_labels a__ : List[Any] = hidden_act a__ : Tuple = num_labels a__ : Optional[int] = scope a__ : str = len(lowercase) def __lowercase ( self) -> int: '''simple docstring''' a__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) a__ : List[str] = self.get_config() return config, pixel_values def __lowercase ( self) -> int: '''simple docstring''' return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def __lowercase ( self , lowercase , lowercase) -> Optional[int]: '''simple docstring''' a__ : int = FlaxRegNetModel(config=lowercase) a__ : Tuple = model(lowercase) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __lowercase ( self , lowercase , lowercase) -> str: '''simple docstring''' a__ : Any = self.num_labels a__ : int = FlaxRegNetForImageClassification(config=lowercase) a__ : str = model(lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def __lowercase ( self) -> List[str]: '''simple docstring''' a__ : List[str] = self.prepare_config_and_inputs() a__ : Any = config_and_inputs a__ : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_flax class A__ ( __UpperCAmelCase , unittest.TestCase ): """simple docstring""" __A : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () __A : Dict = False __A : str = False __A : Optional[int] = False def __lowercase ( self) -> None: '''simple docstring''' a__ : Dict = FlaxRegNetModelTester(self) a__ : Optional[int] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase) def __lowercase ( self) -> str: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __lowercase ( self) -> Optional[int]: '''simple docstring''' return def __lowercase ( self) -> List[str]: '''simple docstring''' a__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase) def __lowercase ( self) -> List[str]: '''simple docstring''' a__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase) @unittest.skip(reason='RegNet does not use inputs_embeds') def __lowercase ( self) -> Optional[int]: '''simple docstring''' pass @unittest.skip(reason='RegNet does not support input and output embeddings') def __lowercase ( self) -> List[str]: '''simple docstring''' pass def __lowercase ( self) -> Optional[Any]: '''simple docstring''' a__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : Dict = model_class(lowercase) a__ : List[str] = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic a__ : List[str] = [*signature.parameters.keys()] a__ : int = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowercase) def __lowercase ( self) -> str: '''simple docstring''' def check_hidden_states_output(lowercase , lowercase , lowercase): a__ : str = model_class(lowercase) a__ : str = model(**self._prepare_for_class(lowercase , lowercase)) a__ : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states a__ : List[Any] = self.model_tester.num_stages self.assertEqual(len(lowercase) , expected_num_stages + 1) a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a__ : List[str] = True check_hidden_states_output(lowercase , lowercase , lowercase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a__ : Optional[Any] = True check_hidden_states_output(lowercase , lowercase , lowercase) def __lowercase ( self) -> Optional[int]: '''simple docstring''' a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): a__ : List[str] = self._prepare_for_class(lowercase , lowercase) a__ : str = model_class(lowercase) @jax.jit def model_jitted(lowercase , **lowercase): return model(pixel_values=lowercase , **lowercase) with self.subTest('JIT Enabled'): a__ : int = model_jitted(**lowercase).to_tuple() with self.subTest('JIT Disabled'): with jax.disable_jit(): a__ : int = model_jitted(**lowercase).to_tuple() self.assertEqual(len(lowercase) , len(lowercase)) for jitted_output, output in zip(lowercase , lowercase): self.assertEqual(jitted_output.shape , output.shape) def A_ ( ) -> Dict: a__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_flax class A__ ( unittest.TestCase ): """simple docstring""" @cached_property def __lowercase ( self) -> Optional[Any]: '''simple docstring''' return AutoImageProcessor.from_pretrained('facebook/regnet-y-040') if is_vision_available() else None @slow def __lowercase ( self) -> List[Any]: '''simple docstring''' a__ : Union[str, Any] = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040') a__ : Union[str, Any] = self.default_image_processor a__ : Optional[int] = prepare_img() a__ : List[str] = image_processor(images=lowercase , return_tensors='np') a__ : Dict = model(**lowercase) # verify the logits a__ : Optional[Any] = (1, 1000) self.assertEqual(outputs.logits.shape , lowercase) a__ : Tuple = jnp.array([-0.41_80, -1.50_51, -3.48_36]) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4))
357
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 lowercase : Union[str, Any] = data_utils.TransfoXLTokenizer lowercase : Optional[int] = data_utils.TransfoXLCorpus lowercase : List[Any] = data_utils lowercase : Tuple = data_utils def A_ ( A__ , A__ , A__ , A__ ) -> Optional[Any]: if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(A__ , 'rb' ) as fp: a__ : int = pickle.load(A__ , encoding='latin1' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) a__ : int = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file'] print(F'Save vocabulary to {pytorch_vocab_dump_path}' ) a__ : List[Any] = corpus.vocab.__dict__ torch.save(A__ , A__ ) a__ : Dict = corpus.__dict__ corpus_dict_no_vocab.pop('vocab' , A__ ) a__ : Optional[int] = pytorch_dump_folder_path + '/' + CORPUS_NAME print(F'Save dataset to {pytorch_dataset_dump_path}' ) torch.save(A__ , A__ ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model a__ : Union[str, Any] = os.path.abspath(A__ ) a__ : Optional[Any] = os.path.abspath(A__ ) print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' ) # Initialise PyTorch model if transfo_xl_config_file == "": a__ : Dict = TransfoXLConfig() else: a__ : Dict = TransfoXLConfig.from_json_file(A__ ) print(F'Building PyTorch model from configuration: {config}' ) a__ : Optional[int] = TransfoXLLMHeadModel(A__ ) a__ : int = load_tf_weights_in_transfo_xl(A__ , A__ , A__ ) # Save pytorch-model a__ : Any = os.path.join(A__ , A__ ) a__ : Dict = os.path.join(A__ , A__ ) print(F'Save PyTorch model to {os.path.abspath(A__ )}' ) torch.save(model.state_dict() , A__ ) print(F'Save configuration file to {os.path.abspath(A__ )}' ) with open(A__ , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowercase : List[Any] = argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the folder to store the PyTorch model or dataset/vocab.""", ) parser.add_argument( """--tf_checkpoint_path""", default="""""", type=str, help="""An optional path to a TensorFlow checkpoint path to be converted.""", ) parser.add_argument( """--transfo_xl_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--transfo_xl_dataset_file""", default="""""", type=str, help="""An optional dataset file to be converted in a vocabulary.""", ) lowercase : Any = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
225
0
import math import flax.linen as nn import jax.numpy as jnp def UpperCamelCase_( _snake_case : jnp.ndarray , _snake_case : int , _snake_case : float = 1 , _snake_case : float = 1 , _snake_case : float = 1.0e4 , _snake_case : bool = False , _snake_case : float = 1.0 , ): """simple docstring""" assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F'Embedding dimension {embedding_dim} should be even' __a =float(embedding_dim // 2 ) __a =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) __a =min_timescale * jnp.exp(jnp.arange(_snake_case , dtype=jnp.floataa ) * -log_timescale_increment ) __a =jnp.expand_dims(_snake_case , 1 ) * jnp.expand_dims(_snake_case , 0 ) # scale embeddings __a =scale * emb if flip_sin_to_cos: __a =jnp.concatenate([jnp.cos(_snake_case ), jnp.sin(_snake_case )] , axis=1 ) else: __a =jnp.concatenate([jnp.sin(_snake_case ), jnp.cos(_snake_case )] , axis=1 ) __a =jnp.reshape(_snake_case , [jnp.shape(_snake_case )[0], embedding_dim] ) return signal class __magic_name__ ( nn.Module ): SCREAMING_SNAKE_CASE = 3_2 SCREAMING_SNAKE_CASE = jnp.floataa @nn.compact def __call__( self , __snake_case ) -> Any: '''simple docstring''' __a =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(__snake_case ) __a =nn.silu(__snake_case ) __a =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(__snake_case ) return temb class __magic_name__ ( nn.Module ): SCREAMING_SNAKE_CASE = 3_2 SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = 1 @nn.compact def __call__( self , __snake_case ) -> Dict: '''simple docstring''' return get_sinusoidal_embeddings( __snake_case , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
218
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Union[str, Any] = { "Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json", } class __magic_name__ ( lowerCAmelCase_ ): SCREAMING_SNAKE_CASE = 'instructblip_vision_model' def __init__( self , __snake_case=1408 , __snake_case=6144 , __snake_case=39 , __snake_case=16 , __snake_case=224 , __snake_case=14 , __snake_case="gelu" , __snake_case=1e-6 , __snake_case=0.0 , __snake_case=1e-10 , __snake_case=True , **__snake_case , ) -> str: '''simple docstring''' super().__init__(**__snake_case ) __a =hidden_size __a =intermediate_size __a =num_hidden_layers __a =num_attention_heads __a =patch_size __a =image_size __a =initializer_range __a =attention_dropout __a =layer_norm_eps __a =hidden_act __a =qkv_bias @classmethod def __magic_name__ ( cls , __snake_case , **__snake_case ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__snake_case ) __a , __a =cls.get_config_dict(__snake_case , **__snake_case ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __a =config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(__snake_case , **__snake_case ) class __magic_name__ ( lowerCAmelCase_ ): SCREAMING_SNAKE_CASE = 'instructblip_qformer' def __init__( self , __snake_case=3_0522 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=2 , __snake_case=1408 , **__snake_case , ) -> List[str]: '''simple docstring''' super().__init__(pad_token_id=__snake_case , **__snake_case ) __a =vocab_size __a =hidden_size __a =num_hidden_layers __a =num_attention_heads __a =hidden_act __a =intermediate_size __a =hidden_dropout_prob __a =attention_probs_dropout_prob __a =max_position_embeddings __a =initializer_range __a =layer_norm_eps __a =position_embedding_type __a =cross_attention_frequency __a =encoder_hidden_size @classmethod def __magic_name__ ( cls , __snake_case , **__snake_case ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__snake_case ) __a , __a =cls.get_config_dict(__snake_case , **__snake_case ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __a =config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(__snake_case , **__snake_case ) class __magic_name__ ( lowerCAmelCase_ ): SCREAMING_SNAKE_CASE = 'instructblip' SCREAMING_SNAKE_CASE = True def __init__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=32 , **__snake_case ) -> str: '''simple docstring''' super().__init__(**__snake_case ) if vision_config is None: __a ={} logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' ) if qformer_config is None: __a ={} logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' ) if text_config is None: __a ={} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) __a =InstructBlipVisionConfig(**__snake_case ) __a =InstructBlipQFormerConfig(**__snake_case ) __a =text_config['model_type'] if 'model_type' in text_config else 'opt' __a =CONFIG_MAPPING[text_model_type](**__snake_case ) __a =self.text_config.tie_word_embeddings __a =self.text_config.is_encoder_decoder __a =num_query_tokens __a =self.vision_config.hidden_size __a =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES __a =1.0 __a =0.02 @classmethod def __magic_name__ ( cls , __snake_case , __snake_case , __snake_case , **__snake_case , ) -> Optional[Any]: '''simple docstring''' return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__snake_case , ) def __magic_name__ ( self ) -> List[Any]: '''simple docstring''' __a =copy.deepcopy(self.__dict__ ) __a =self.vision_config.to_dict() __a =self.qformer_config.to_dict() __a =self.text_config.to_dict() __a =self.__class__.model_type return output
218
1
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :Any = ["input_features", "is_longer"] def __init__( self , __A=64 , __A=4_8000 , __A=480 , __A=10 , __A=1024 , __A=0.0 , __A=False , __A = 0 , __A = 1_4000 , __A = None , __A = "fusion" , __A = "repeatpad" , **__A , ) -> int: super().__init__( feature_size=__A , sampling_rate=__A , padding_value=__A , return_attention_mask=__A , **__A , ) lowerCAmelCase_ :Optional[int] = top_db lowerCAmelCase_ :str = truncation lowerCAmelCase_ :Any = padding lowerCAmelCase_ :int = fft_window_size lowerCAmelCase_ :int = (fft_window_size >> 1) + 1 lowerCAmelCase_ :Any = hop_length lowerCAmelCase_ :int = max_length_s lowerCAmelCase_ :Dict = max_length_s * sampling_rate lowerCAmelCase_ :str = sampling_rate lowerCAmelCase_ :int = frequency_min lowerCAmelCase_ :Optional[int] = frequency_max lowerCAmelCase_ :int = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__A , min_frequency=__A , max_frequency=__A , sampling_rate=__A , norm=__A , mel_scale="""htk""" , ) lowerCAmelCase_ :List[str] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__A , min_frequency=__A , max_frequency=__A , sampling_rate=__A , norm="""slaney""" , mel_scale="""slaney""" , ) def __lowerCAmelCase ( self ) -> Dict[str, Any]: lowerCAmelCase_ :List[str] = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ :Tuple = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def __lowerCAmelCase ( self , __A , __A = None ) -> np.ndarray: lowerCAmelCase_ :Tuple = spectrogram( __A , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__A , log_mel="""dB""" , ) return log_mel_spectrogram.T def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[int]: lowerCAmelCase_ :int = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk lowerCAmelCase_ :List[str] = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk lowerCAmelCase_ :Any = [0] # randomly choose index for each part lowerCAmelCase_ :Tuple = np.random.choice(ranges[0] ) lowerCAmelCase_ :List[Any] = np.random.choice(ranges[1] ) lowerCAmelCase_ :str = np.random.choice(ranges[2] ) lowerCAmelCase_ :str = mel[idx_front : idx_front + chunk_frames, :] lowerCAmelCase_ :List[Any] = mel[idx_middle : idx_middle + chunk_frames, :] lowerCAmelCase_ :int = mel[idx_back : idx_back + chunk_frames, :] lowerCAmelCase_ :Union[str, Any] = torch.tensor(mel[None, None, :] ) lowerCAmelCase_ :Dict = torch.nn.functional.interpolate( __A , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=__A ) lowerCAmelCase_ :List[str] = mel_shrink[0][0].numpy() lowerCAmelCase_ :Optional[Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def __lowerCAmelCase ( self , __A , __A , __A , __A ) -> np.array: if waveform.shape[0] > max_length: if truncation == "rand_trunc": lowerCAmelCase_ :Union[str, Any] = True # random crop to max_length (for compatibility) -> this should be handled by self.pad lowerCAmelCase_ :List[Any] = len(__A ) - max_length lowerCAmelCase_ :Any = np.random.randint(0 , overflow + 1 ) lowerCAmelCase_ :Optional[int] = waveform[idx : idx + max_length] lowerCAmelCase_ :Optional[Any] = self._np_extract_fbank_features(__A , self.mel_filters_slaney )[None, :] elif truncation == "fusion": lowerCAmelCase_ :List[str] = self._np_extract_fbank_features(__A , self.mel_filters ) lowerCAmelCase_ :Dict = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed lowerCAmelCase_ :Optional[int] = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. lowerCAmelCase_ :Optional[int] = np.stack([mel, mel, mel, mel] , axis=0 ) lowerCAmelCase_ :Dict = False else: lowerCAmelCase_ :Tuple = self._random_mel_fusion(__A , __A , __A ) lowerCAmelCase_ :Dict = True else: raise NotImplementedError(f"""data_truncating {truncation} not implemented""" ) else: lowerCAmelCase_ :Union[str, Any] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": lowerCAmelCase_ :Optional[Any] = int(max_length / len(__A ) ) lowerCAmelCase_ :List[Any] = np.stack(np.tile(__A , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": lowerCAmelCase_ :Optional[int] = int(max_length / len(__A ) ) lowerCAmelCase_ :int = np.stack(np.tile(__A , __A ) ) lowerCAmelCase_ :List[Any] = np.pad(__A , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 ) if truncation == "fusion": lowerCAmelCase_ :int = self._np_extract_fbank_features(__A , self.mel_filters ) lowerCAmelCase_ :Any = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: lowerCAmelCase_ :int = self._np_extract_fbank_features(__A , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , **__A , ) -> BatchFeature: lowerCAmelCase_ :Union[str, Any] = truncation if truncation is not None else self.truncation lowerCAmelCase_ :Optional[Any] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) lowerCAmelCase_ :Optional[Any] = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) lowerCAmelCase_ :Optional[Any] = is_batched_numpy or ( isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowerCAmelCase_ :int = [np.asarray(__A , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__A , np.ndarray ): lowerCAmelCase_ :List[str] = np.asarray(__A , dtype=np.floataa ) elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowerCAmelCase_ :int = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowerCAmelCase_ :Optional[int] = [np.asarray(__A )] # convert to mel spectrogram, truncate and pad if needed. lowerCAmelCase_ :Tuple = [ self._get_input_mel(__A , max_length if max_length else self.nb_max_samples , __A , __A ) for waveform in raw_speech ] lowerCAmelCase_ :Dict = [] lowerCAmelCase_ :Tuple = [] for mel, longer in padded_inputs: input_mel.append(__A ) is_longer.append(__A ) if truncation == "fusion" and sum(__A ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer lowerCAmelCase_ :str = np.random.randint(0 , len(__A ) ) lowerCAmelCase_ :List[str] = True if isinstance(input_mel[0] , __A ): lowerCAmelCase_ :str = [np.asarray(__A , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool lowerCAmelCase_ :Dict = [[longer] for longer in is_longer] lowerCAmelCase_ :List[Any] = {"""input_features""": input_mel, """is_longer""": is_longer} lowerCAmelCase_ :Tuple = BatchFeature(__A ) if return_tensors is not None: lowerCAmelCase_ :Optional[int] = input_features.convert_to_tensors(__A ) return input_features
1
"""simple docstring""" def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int: '''simple docstring''' if index == number_of_items: return 0 lowerCAmelCase_ :Any = 0 lowerCAmelCase_ :str = 0 lowerCAmelCase_ :Dict = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 ) if weights[index] <= max_weight: lowerCAmelCase_ :str = values[index] + knapsack( lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 ) return max(lowercase__ , lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
1
1
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : int ) -> float: '''simple docstring''' if principal <= 0: raise Exception("Principal borrowed must be > 0" ) if rate_per_annum < 0: raise Exception("Rate of interest must be >= 0" ) if years_to_repay <= 0 or not isinstance(snake_case_ , snake_case_ ): raise Exception("Years to repay must be an integer > 0" ) # Yearly rate is divided by 12 to get monthly rate UpperCAmelCase_ = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly UpperCAmelCase_ = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
1
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ) -> List[Any]: '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str=True ) -> Optional[Any]: '''simple docstring''' model.train() UpperCAmelCase_ = model(snake_case_ ) UpperCAmelCase_ = F.mse_loss(snake_case_ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(snake_case_ ) def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any=False ) -> Dict: '''simple docstring''' set_seed(42 ) UpperCAmelCase_ = RegressionModel() UpperCAmelCase_ = deepcopy(snake_case_ ) UpperCAmelCase_ = RegressionDataset(length=80 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) model.to(accelerator.device ) if sched: UpperCAmelCase_ = AdamW(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 ) UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 ) UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 ) # Make a copy of `model` if sched: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def lowerCAmelCase_ ( snake_case_ : Any ) -> int: '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) # Use a single batch UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: # Sync grads step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] def lowerCAmelCase_ ( snake_case_ : Tuple ) -> str: '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) # Use a single batch UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: # Sync grads step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] def lowerCAmelCase_ ( snake_case_ : Optional[int]=False , snake_case_ : str=False ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = Accelerator( split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) for iteration, batch in enumerate(snake_case_ ): UpperCAmelCase_ , UpperCAmelCase_ = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case_ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] GradientState._reset_state() def lowerCAmelCase_ ( snake_case_ : Optional[Any]=False , snake_case_ : Tuple=False ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = Accelerator( split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ , snake_case_ ) for iteration, batch in enumerate(snake_case_ ): UpperCAmelCase_ , UpperCAmelCase_ = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case_ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" UpperCAmelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case_ )) if accelerator.num_processes > 1: check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) GradientState._reset_state() def lowerCAmelCase_ ( ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = Accelerator() UpperCAmelCase_ = RegressionDataset(length=80 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) UpperCAmelCase_ = RegressionDataset(length=96 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(snake_case_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ ) if iteration < len(snake_case_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(snake_case_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ ) if batch_num < len(snake_case_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def lowerCAmelCase_ ( ) -> str: '''simple docstring''' UpperCAmelCase_ = Accelerator() UpperCAmelCase_ = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(snake_case_ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(snake_case_ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation(snake_case_ , snake_case_ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation_with_opt_and_scheduler(snake_case_ , snake_case_ ) def lowerCAmelCase_ ( snake_case_ : Dict ) -> int: '''simple docstring''' main() if __name__ == "__main__": main()
1
1
'''simple docstring''' import collections import importlib.util import os import re from pathlib import Path lowerCAmelCase : int ='src/transformers' # Matches is_xxx_available() lowerCAmelCase : Any =re.compile(r'''is\_([a-z_]*)_available()''') # Catches a one-line _import_struct = {xxx} lowerCAmelCase : Optional[Any] =re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCAmelCase : List[Any] =re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''') # Catches a line if not is_foo_available lowerCAmelCase : List[str] =re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''') # Catches a line _import_struct["bla"].append("foo") lowerCAmelCase : Tuple =re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCAmelCase : str =re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''') # Catches a line with an object between quotes and a comma: "MyModel", lowerCAmelCase : Any =re.compile('''^\s+"([^"]+)",''') # Catches a line with objects between brackets only: ["foo", "bar"], lowerCAmelCase : List[str] =re.compile('''^\s+\[([^\]]+)\]''') # Catches a line with from foo import bar, bla, boo lowerCAmelCase : str =re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') # Catches a line with try: lowerCAmelCase : Optional[Any] =re.compile(r'''^\s*try:''') # Catches a line with else: lowerCAmelCase : Tuple =re.compile(r'''^\s*else:''') def UpperCAmelCase_ ( __lowerCamelCase : Union[str, Any] ): """simple docstring""" if _re_test_backend.search(snake_case_ ) is None: return None lowercase_ :Union[str, Any] = [b[0] for b in _re_backend.findall(snake_case_ )] backends.sort() return "_and_".join(snake_case_ ) def UpperCAmelCase_ ( __lowerCamelCase : Optional[Any] ): """simple docstring""" with open(snake_case_ ,"r" ,encoding="utf-8" ,newline="\n" ) as f: lowercase_ :Tuple = f.readlines() lowercase_ :Dict = 0 while line_index < len(snake_case_ ) and not lines[line_index].startswith("_import_structure = {" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(snake_case_ ): return None # First grab the objects without a specific backend in _import_structure lowercase_ :Dict = [] while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None: lowercase_ :Union[str, Any] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(snake_case_ ): lowercase_ :int = _re_one_line_import_struct.search(snake_case_ ).groups()[0] lowercase_ :str = re.findall("\[([^\]]+)\]" ,snake_case_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(", " )] ) line_index += 1 continue lowercase_ :List[Any] = _re_import_struct_key_value.search(snake_case_ ) if single_line_import_search is not None: lowercase_ :Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(snake_case_ ) > 0] objects.extend(snake_case_ ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) line_index += 1 lowercase_ :Optional[Any] = {"none": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("if TYPE_CHECKING" ): # If the line is an if not is_backend_available, we grab all objects associated. lowercase_ :Tuple = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowercase_ :Dict = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowercase_ :List[str] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ): lowercase_ :Dict = lines[line_index] if _re_import_struct_add_one.search(snake_case_ ) is not None: objects.append(_re_import_struct_add_one.search(snake_case_ ).groups()[0] ) elif _re_import_struct_add_many.search(snake_case_ ) is not None: lowercase_ :Optional[int] = _re_import_struct_add_many.search(snake_case_ ).groups()[0].split(", " ) lowercase_ :Any = [obj[1:-1] for obj in imports if len(snake_case_ ) > 0] objects.extend(snake_case_ ) elif _re_between_brackets.search(snake_case_ ) is not None: lowercase_ :List[Any] = _re_between_brackets.search(snake_case_ ).groups()[0].split(", " ) lowercase_ :Tuple = [obj[1:-1] for obj in imports if len(snake_case_ ) > 0] objects.extend(snake_case_ ) elif _re_quote_object.search(snake_case_ ) is not None: objects.append(_re_quote_object.search(snake_case_ ).groups()[0] ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) elif line.startswith(" " * 12 + "\"" ): objects.append(line[13:-3] ) line_index += 1 lowercase_ :int = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowercase_ :int = [] while ( line_index < len(snake_case_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("else" ) ): lowercase_ :Optional[int] = lines[line_index] lowercase_ :Optional[Any] = _re_import.search(snake_case_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 8 ): objects.append(line[8:-2] ) line_index += 1 lowercase_ :List[str] = {"none": objects} # Let's continue with backend-specific objects while line_index < len(snake_case_ ): # If the line is an if is_backend_available, we grab all objects associated. lowercase_ :Union[str, Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowercase_ :Dict = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowercase_ :int = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ): lowercase_ :List[str] = lines[line_index] lowercase_ :List[str] = _re_import.search(snake_case_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 12 ): objects.append(line[12:-2] ) line_index += 1 lowercase_ :Optional[Any] = objects else: line_index += 1 return import_dict_objects, type_hint_objects def UpperCAmelCase_ ( __lowerCamelCase : Dict ,__lowerCamelCase : int ): """simple docstring""" def find_duplicates(__lowerCamelCase : Tuple ): return [k for k, v in collections.Counter(snake_case_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowercase_ :List[Any] = [] for key in import_dict_objects.keys(): lowercase_ :int = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' ) lowercase_ :Any = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowercase_ :str = "base imports" if key == "none" else F'{key} backend' errors.append(F'Differences for {name}:' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F' {a} in TYPE_HINT but not in _import_structure.' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F' {a} in _import_structure but not in TYPE_HINT.' ) return errors def UpperCAmelCase_ ( ): """simple docstring""" lowercase_ :Union[str, Any] = [] for root, _, files in os.walk(snake_case_ ): if "__init__.py" in files: lowercase_ :int = os.path.join(snake_case_ ,"__init__.py" ) lowercase_ :List[Any] = parse_init(snake_case_ ) if objects is not None: lowercase_ :Optional[int] = analyze_results(*snake_case_ ) if len(snake_case_ ) > 0: lowercase_ :int = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}' failures.append("\n".join(snake_case_ ) ) if len(snake_case_ ) > 0: raise ValueError("\n\n".join(snake_case_ ) ) def UpperCAmelCase_ ( ): """simple docstring""" lowercase_ :Optional[Any] = [] for path, directories, files in os.walk(snake_case_ ): for folder in directories: # Ignore private modules if folder.startswith("_" ): directories.remove(snake_case_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(snake_case_ ) / folder).glob("*.py" ) ) ) == 0: continue lowercase_ :str = str((Path(snake_case_ ) / folder).relative_to(snake_case_ ) ) lowercase_ :Union[str, Any] = short_path.replace(os.path.sep ,"." ) submodules.append(snake_case_ ) for fname in files: if fname == "__init__.py": continue lowercase_ :int = str((Path(snake_case_ ) / fname).relative_to(snake_case_ ) ) lowercase_ :List[Any] = short_path.replace(".py" ,"" ).replace(os.path.sep ,"." ) if len(submodule.split("." ) ) == 1: submodules.append(snake_case_ ) return submodules lowerCAmelCase : Any =[ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', ] def UpperCAmelCase_ ( ): """simple docstring""" lowercase_ :str = importlib.util.spec_from_file_location( "transformers" ,os.path.join(snake_case_ ,"__init__.py" ) ,submodule_search_locations=[PATH_TO_TRANSFORMERS] ,) lowercase_ :Optional[int] = spec.loader.load_module() lowercase_ :Any = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(snake_case_ ) > 0: lowercase_ :Union[str, Any] = "\n".join(F'- {module}' for module in module_not_registered ) raise ValueError( "The following submodules are not properly registered in the main init of Transformers:\n" F'{list_of_modules}\n' "Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." ) if __name__ == "__main__": check_all_inits() check_submodules()
368
'''simple docstring''' def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : str ): lowercase_ :Optional[Any] = len(__lowerCamelCase ) lowercase_ :List[Any] = [] for i in range(len(__lowerCamelCase ) - pat_len + 1 ): lowercase_ :Dict = True for j in range(__lowerCamelCase ): if s[i + j] != pattern[j]: lowercase_ :int = False break if match_found: position.append(__lowerCamelCase ) return position if __name__ == "__main__": assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3] print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
147
0
"""simple docstring""" import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class lowerCAmelCase_ (a__ ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = parent SCREAMING_SNAKE_CASE__ : List[Any] = config_class SCREAMING_SNAKE_CASE__ : int = has_text_modality SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs SCREAMING_SNAKE_CASE__ : Optional[Any] = common_properties def __magic_name__ (self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = self.config_class(**self.inputs_dict ) SCREAMING_SNAKE_CASE__ : Dict = ( ["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(["""vocab_size"""] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , msg=F'''`{prop}` does not exist''' ) # Test that config has the common properties as setter for idx, name in enumerate(SCREAMING_SNAKE_CASE__ ): try: setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual( getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , msg=F'''`{name} value {idx} expected, but was {getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(SCREAMING_SNAKE_CASE__ ): try: SCREAMING_SNAKE_CASE__ : Tuple = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , msg=F'''`{name} value {idx} expected, but was {getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}''' ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def __magic_name__ (self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = self.config_class(**self.inputs_dict ) SCREAMING_SNAKE_CASE__ : Any = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE__ : Any = os.path.join(SCREAMING_SNAKE_CASE__ , """config.json""" ) config_first.to_json_file(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : str = self.config_class.from_json_file(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def __magic_name__ (self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = self.config_class.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.config_class(**self.inputs_dict ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = """test""" with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) config_first.save_pretrained(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Any = self.config_class.from_pretrained(SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def __magic_name__ (self ) -> Any: """simple docstring""" if self.config_class.is_composition: return SCREAMING_SNAKE_CASE__ : str = self.config_class() self.parent.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) def __magic_name__ (self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.config_class(**SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : str = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) ) elif getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) != value: wrong_values.append((key, getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), value) ) if len(SCREAMING_SNAKE_CASE__ ) > 0: SCREAMING_SNAKE_CASE__ : Union[str, Any] = """\n""".join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] ) raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' ) def __magic_name__ (self ) -> Optional[Any]: """simple docstring""" self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
25
"""simple docstring""" import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py UpperCAmelCase__ : List[str] = '.' # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) UpperCAmelCase__ : List[Any] = [ 'Assert', 'AssignVariableOp', 'EmptyTensorList', 'MergeV2Checkpoints', 'ReadVariableOp', 'ResourceGather', 'RestoreV2', 'SaveV2', 'ShardedFilename', 'StatefulPartitionedCall', 'StaticRegexFullMatch', 'VarHandleOp', ] def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : List[str] = SavedModel() SCREAMING_SNAKE_CASE__ : Dict = [] with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f: SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""] for i in range(1 ,opset + 1 ): onnx_ops.extend(onnx_opsets[str(_snake_case )] ) with open(_snake_case ,"""rb""" ) as f: saved_model.ParseFromString(f.read() ) SCREAMING_SNAKE_CASE__ : List[str] = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(_snake_case ) if strict and len(_snake_case ) > 0: raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops ) elif len(_snake_case ) > 0: print(f'''Found the following incompatible ops for the opset {opset}:''' ) print(*_snake_case ,sep="""\n""" ) else: print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' ) if __name__ == "__main__": UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).') parser.add_argument( '--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.' ) parser.add_argument( '--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.' ) parser.add_argument( '--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)' ) UpperCAmelCase__ : Dict = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
25
1
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def lowerCamelCase_ ( lowerCAmelCase: int )-> Dict: _snake_case : Tuple = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2] _snake_case : str = True if 'large' in model_name or 'huge' in model_name else False _snake_case : Tuple = True if 'large' in model_name or 'huge' in model_name else False _snake_case : Optional[Any] = True if 'large' in model_name or 'huge' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: _snake_case : Optional[Any] = [3, 3, 3, 3] _snake_case : Any = [5, 5, 5, 5] elif "fl4" in model_name: _snake_case : Optional[Any] = [4, 4, 4, 4] _snake_case : Optional[int] = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: _snake_case : Tuple = [3, 3, 3, 3] if "lrf" in model_name: _snake_case : List[Any] = [3, 3, 3, 3] else: _snake_case : List[Any] = [2, 2, 2, 2] if "tiny" in model_name: _snake_case : Any = 96 elif "small" in model_name: _snake_case : Tuple = 96 elif "base" in model_name: _snake_case : Optional[int] = 1_28 elif "large" in model_name: _snake_case : Any = 1_92 elif "xlarge" in model_name: _snake_case : Tuple = 2_56 elif "huge" in model_name: _snake_case : str = 3_52 # set label information _snake_case : Optional[int] = 'huggingface/label-files' if "large" in model_name or "huge" in model_name: _snake_case : List[Any] = 'imagenet-22k-id2label.json' else: _snake_case : Union[str, Any] = 'imagenet-1k-id2label.json' _snake_case : Optional[Any] = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='dataset' ) , 'r' ) ) _snake_case : Optional[int] = {int(__lowerCamelCase ): v for k, v in idalabel.items()} _snake_case : List[Any] = {v: k for k, v in idalabel.items()} _snake_case : Union[str, Any] = FocalNetConfig( embed_dim=__lowerCamelCase , depths=__lowerCamelCase , focal_levels=__lowerCamelCase , focal_windows=__lowerCamelCase , use_conv_embed=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , use_post_layernorm=__lowerCamelCase , use_layerscale=__lowerCamelCase , ) return config def lowerCamelCase_ ( lowerCAmelCase: Any )-> List[str]: if "patch_embed.proj" in name: _snake_case : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: _snake_case : str = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: _snake_case : Dict = 'encoder.' + name if "encoder.layers" in name: _snake_case : List[Any] = name.replace('encoder.layers' , 'encoder.stages' ) if "downsample.proj" in name: _snake_case : Tuple = name.replace('downsample.proj' , 'downsample.projection' ) if "blocks" in name: _snake_case : str = name.replace('blocks' , 'layers' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: _snake_case : Dict = name.replace('modulation.f' , 'modulation.projection_in' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: _snake_case : List[str] = name.replace('modulation.h' , 'modulation.projection_context' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: _snake_case : List[Any] = name.replace('modulation.proj' , 'modulation.projection_out' ) if name == "norm.weight": _snake_case : Optional[Any] = 'layernorm.weight' if name == "norm.bias": _snake_case : str = 'layernorm.bias' if "head" in name: _snake_case : int = name.replace('head' , 'classifier' ) else: _snake_case : str = 'focalnet.' + name return name def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Any , lowerCAmelCase: int=False )-> List[Any]: # fmt: off _snake_case : Optional[int] = { 'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth', 'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth', 'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth', 'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth', 'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth', 'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth', 'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth', 'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth', 'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth', 'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth', } # fmt: on _snake_case : Dict = model_name_to_url[model_name] print('Checkpoint URL: ' , __lowerCamelCase ) _snake_case : Optional[Any] = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location='cpu' )['model'] # rename keys for key in state_dict.copy().keys(): _snake_case : Union[str, Any] = state_dict.pop(__lowerCamelCase ) _snake_case : List[Any] = val _snake_case : Tuple = get_focalnet_config(__lowerCamelCase ) _snake_case : Optional[int] = FocalNetForImageClassification(__lowerCamelCase ) model.eval() # load state dict model.load_state_dict(__lowerCamelCase ) # verify conversion _snake_case : str = 'http://images.cocodataset.org/val2017/000000039769.jpg' _snake_case : str = BitImageProcessor( do_resize=__lowerCamelCase , size={'shortest_edge': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=__lowerCamelCase , crop_size=2_24 , do_normalize=__lowerCamelCase , image_mean=__lowerCamelCase , image_std=__lowerCamelCase , ) _snake_case : Any = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ) _snake_case : Dict = processor(images=__lowerCamelCase , return_tensors='pt' ) _snake_case : Union[str, Any] = transforms.Compose( [ transforms.Resize(2_56 ), transforms.CenterCrop(2_24 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) _snake_case : Tuple = image_transforms(__lowerCamelCase ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , __lowerCamelCase , atol=1E-4 ) _snake_case : Dict = model(**__lowerCamelCase ) _snake_case : int = outputs.logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) print('First values of logits:' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": _snake_case : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ) elif model_name == "focalnet-tiny-lrf": _snake_case : List[Any] = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] ) elif model_name == "focalnet-small": _snake_case : Optional[Any] = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] ) elif model_name == "focalnet-small-lrf": _snake_case : int = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] ) elif model_name == "focalnet-base": _snake_case : Union[str, Any] = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] ) elif model_name == "focalnet-base-lrf": _snake_case : str = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] ) assert torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__lowerCamelCase ) processor.save_pretrained(__lowerCamelCase ) if push_to_hub: print(F"""Pushing model and processor of {model_name} to the hub...""" ) model.push_to_hub(F"""{model_name}""" ) processor.push_to_hub(F"""{model_name}""" ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""focalnet-tiny""", type=str, help="""Name of the FocalNet model you\'d like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub.""", ) lowerCAmelCase_ = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
368
from __future__ import annotations def lowerCamelCase_ ( lowerCAmelCase: list , lowerCAmelCase: int | None = None , lowerCAmelCase: int | None = None )-> None: if start is None: _snake_case : int = 0 if end is None: _snake_case : Optional[int] = len(lowerCAmelCase ) - 1 if start >= end: return _snake_case : int = (start + end) // 2 slowsort(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) slowsort(lowerCAmelCase , mid + 1 , lowerCAmelCase ) if sequence[end] < sequence[mid]: _snake_case , _snake_case : str = sequence[mid], sequence[end] slowsort(lowerCAmelCase , lowerCAmelCase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
260
0
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: List[str] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowerCAmelCase__ ( self: str ): __lowerCamelCase = 1 __lowerCamelCase = 3 __lowerCamelCase = (32, 32) __lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ ) return image @property def lowerCAmelCase__ ( self: List[str] ): torch.manual_seed(0 ) __lowerCamelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) return model @property def lowerCAmelCase__ ( self: Optional[Any] ): torch.manual_seed(0 ) __lowerCamelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def lowerCAmelCase__ ( self: Any ): torch.manual_seed(0 ) __lowerCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModel(UpperCamelCase_ ) @property def lowerCAmelCase__ ( self: Tuple ): def extract(*UpperCamelCase_: Tuple , **UpperCamelCase_: Union[str, Any] ): class lowerCamelCase__: def __init__( self: Dict ): __lowerCamelCase = torch.ones([0] ) def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[Any] ): self.pixel_values.to(UpperCamelCase_ ) return self return Out() return extract def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator __lowerCamelCase = self.dummy_cond_unet __lowerCamelCase = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , ) __lowerCamelCase = self.dummy_vae __lowerCamelCase = self.dummy_text_encoder __lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk __lowerCamelCase = StableDiffusionPipeline( unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , ) __lowerCamelCase = sd_pipe.to(UpperCamelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowerCamelCase = """A painting of a squirrel eating a burger""" __lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 ) __lowerCamelCase = sd_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" ) __lowerCamelCase = output.images __lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 ) __lowerCamelCase = sd_pipe( [prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=UpperCamelCase_ , )[0] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCamelCase = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCAmelCase__ ( self: Optional[int] ): __lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator __lowerCamelCase = self.dummy_cond_unet __lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ ) __lowerCamelCase = self.dummy_vae __lowerCamelCase = self.dummy_text_encoder __lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # make sure here that pndm scheduler skips prk __lowerCamelCase = StableDiffusionPipeline( unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , ) __lowerCamelCase = sd_pipe.to(UpperCamelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowerCamelCase = """A painting of a squirrel eating a burger""" __lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 ) __lowerCamelCase = sd_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" ) __lowerCamelCase = output.images __lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 ) __lowerCamelCase = sd_pipe( [prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=UpperCamelCase_ , )[0] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCamelCase = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCAmelCase__ ( self: Optional[int] ): __lowerCamelCase = StableDiffusionPipeline.from_pretrained( """hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=UpperCamelCase_ ) assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) assert isinstance(pipe.scheduler , UpperCamelCase_ ) assert pipe.safety_checker is None __lowerCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(UpperCamelCase_ ) __lowerCamelCase = StableDiffusionPipeline.from_pretrained(UpperCamelCase_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None __lowerCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def lowerCAmelCase__ ( self: List[Any] ): __lowerCamelCase = self.dummy_cond_unet __lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ ) __lowerCamelCase = self.dummy_vae __lowerCamelCase = self.dummy_text_encoder __lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) # put models in fp16 __lowerCamelCase = unet.half() __lowerCamelCase = vae.half() __lowerCamelCase = bert.half() # make sure here that pndm scheduler skips prk __lowerCamelCase = StableDiffusionPipeline( unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , ) __lowerCamelCase = sd_pipe.to(UpperCamelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowerCamelCase = """A painting of a squirrel eating a burger""" __lowerCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class lowerCamelCase__( unittest.TestCase): def lowerCAmelCase__ ( self: Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self: Tuple ): __lowerCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=UpperCamelCase_ ) __lowerCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) __lowerCamelCase = sd_pipe.to(UpperCamelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowerCamelCase = ( """portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle""" """ coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with""" """ anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and""" """ children from bahnhof zoo, detailed """ ) __lowerCamelCase = 40_03_66_03_46 __lowerCamelCase = 7 # without safety guidance (sld_guidance_scale = 0) __lowerCamelCase = torch.manual_seed(UpperCamelCase_ ) __lowerCamelCase = sd_pipe( [prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) __lowerCamelCase = output.images __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) __lowerCamelCase = torch.manual_seed(UpperCamelCase_ ) __lowerCamelCase = sd_pipe( [prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __lowerCamelCase = output.images __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCAmelCase__ ( self: Any ): __lowerCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=UpperCamelCase_ ) __lowerCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) __lowerCamelCase = sd_pipe.to(UpperCamelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowerCamelCase = """padme amidala taking a bath artwork, safe for work, no nudity""" __lowerCamelCase = 27_34_97_17_55 __lowerCamelCase = 7 __lowerCamelCase = torch.manual_seed(UpperCamelCase_ ) __lowerCamelCase = sd_pipe( [prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) __lowerCamelCase = output.images __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 __lowerCamelCase = torch.manual_seed(UpperCamelCase_ ) __lowerCamelCase = sd_pipe( [prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __lowerCamelCase = output.images __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCAmelCase__ ( self: Dict ): __lowerCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" ) __lowerCamelCase = sd_pipe.to(UpperCamelCase_ ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowerCamelCase = ( """the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.""" """ leyendecker""" ) __lowerCamelCase = 10_44_35_52_34 __lowerCamelCase = 12 __lowerCamelCase = torch.manual_seed(UpperCamelCase_ ) __lowerCamelCase = sd_pipe( [prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) __lowerCamelCase = output.images __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 __lowerCamelCase = torch.manual_seed(UpperCamelCase_ ) __lowerCamelCase = sd_pipe( [prompt] , generator=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __lowerCamelCase = output.images __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] ) assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
12
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase): @register_to_config def __init__( self: Optional[Any] , UpperCamelCase_: bool , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None ): super().__init__() __lowerCamelCase = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" __lowerCamelCase = torch.zeros(UpperCamelCase_ , UpperCamelCase_ ) else: __lowerCamelCase = None __lowerCamelCase = torch.nn.Parameter(UpperCamelCase_ ) class lowerCamelCase__( __lowerCamelCase): UpperCAmelCase__ : VQModel UpperCAmelCase__ : CLIPTextModel UpperCAmelCase__ : CLIPTokenizer UpperCAmelCase__ : TransformeraDModel UpperCAmelCase__ : LearnedClassifierFreeSamplingEmbeddings UpperCAmelCase__ : VQDiffusionScheduler def __init__( self: str , UpperCamelCase_: VQModel , UpperCamelCase_: CLIPTextModel , UpperCamelCase_: CLIPTokenizer , UpperCamelCase_: TransformeraDModel , UpperCamelCase_: VQDiffusionScheduler , UpperCamelCase_: LearnedClassifierFreeSamplingEmbeddings , ): super().__init__() self.register_modules( vqvae=UpperCamelCase_ , transformer=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , scheduler=UpperCamelCase_ , learned_classifier_free_sampling_embeddings=UpperCamelCase_ , ) def lowerCAmelCase__ ( self: str , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Any ): __lowerCamelCase = len(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else 1 # get prompt text embeddings __lowerCamelCase = self.tokenizer( UpperCamelCase_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) __lowerCamelCase = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" F' {self.tokenizer.model_max_length} tokens: {removed_text}' ) __lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length] __lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 __lowerCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ ) # duplicate text embeddings for each generation per prompt __lowerCamelCase = prompt_embeds.repeat_interleave(UpperCamelCase_ , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: __lowerCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings __lowerCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(UpperCamelCase_ , 1 , 1 ) else: __lowerCamelCase = [""""""] * batch_size __lowerCamelCase = text_input_ids.shape[-1] __lowerCamelCase = self.tokenizer( UpperCamelCase_ , padding="""max_length""" , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors="""pt""" , ) __lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings __lowerCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase_ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __lowerCamelCase = negative_prompt_embeds.shape[1] __lowerCamelCase = negative_prompt_embeds.repeat(1 , UpperCamelCase_ , 1 ) __lowerCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __lowerCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self: Tuple , UpperCamelCase_: Union[str, List[str]] , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 5.0 , UpperCamelCase_: float = 1.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_: int = 1 , ): if isinstance(UpperCamelCase_ , UpperCamelCase_ ): __lowerCamelCase = 1 elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): __lowerCamelCase = len(UpperCamelCase_ ) else: raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}' ) __lowerCamelCase = batch_size * num_images_per_prompt __lowerCamelCase = guidance_scale > 1.0 __lowerCamelCase = self._encode_prompt(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0) ): raise ValueError( F'`callback_steps` has to be a positive integer but is {callback_steps} of type' F' {type(UpperCamelCase_ )}.' ) # get the initial completely masked latents unless the user supplied it __lowerCamelCase = (batch_size, self.transformer.num_latent_pixels) if latents is None: __lowerCamelCase = self.transformer.num_vector_embeds - 1 __lowerCamelCase = torch.full(UpperCamelCase_ , UpperCamelCase_ ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( """Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,""" F' {self.transformer.num_vector_embeds - 1} (inclusive).' ) __lowerCamelCase = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(UpperCamelCase_ , device=self.device ) __lowerCamelCase = self.scheduler.timesteps.to(self.device ) __lowerCamelCase = latents for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ): # expand the sample if we are doing classifier free guidance __lowerCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` __lowerCamelCase = self.transformer(UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , timestep=UpperCamelCase_ ).sample if do_classifier_free_guidance: __lowerCamelCase, __lowerCamelCase = model_output.chunk(2 ) __lowerCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(UpperCamelCase_ , dim=1 , keepdim=UpperCamelCase_ ) __lowerCamelCase = self.truncate(UpperCamelCase_ , UpperCamelCase_ ) # remove `log(0)`'s (`-inf`s) __lowerCamelCase = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 __lowerCamelCase = self.scheduler.step(UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) __lowerCamelCase = self.vqvae.config.vq_embed_dim __lowerCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) __lowerCamelCase = self.vqvae.quantize.get_codebook_entry(UpperCamelCase_ , shape=UpperCamelCase_ ) __lowerCamelCase = self.vqvae.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ ).sample __lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 ) __lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ ) def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: float ): __lowerCamelCase, __lowerCamelCase = torch.sort(UpperCamelCase_ , 1 , descending=UpperCamelCase_ ) __lowerCamelCase = torch.exp(UpperCamelCase_ ) __lowerCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out __lowerCamelCase = torch.full_like(keep_mask[:, 0:1, :] , UpperCamelCase_ ) __lowerCamelCase = torch.cat((all_true, keep_mask) , dim=1 ) __lowerCamelCase = keep_mask[:, :-1, :] __lowerCamelCase = keep_mask.gather(1 , indices.argsort(1 ) ) __lowerCamelCase = log_p_x_0.clone() __lowerCamelCase = -torch.inf # -inf = log(0) return rv
12
1
def __UpperCamelCase ( UpperCAmelCase = 6008_5147_5143 ): try: lowercase__ : Tuple = int(__A ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) lowercase__ : List[Any] = 2 lowercase__ : List[str] = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 lowercase__ : str = i while n % i == 0: lowercase__ : List[str] = n // i i += 1 return int(__A ) if __name__ == "__main__": print(F'{solution() = }')
357
'''simple docstring''' import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class UpperCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1.0 , __lowerCAmelCase = None , ) -> Union[str, Any]: super().__init__() lowercase__ : Any = initial_learning_rate lowercase__ : str = warmup_steps lowercase__ : Dict = power lowercase__ : int = decay_schedule_fn lowercase__ : Dict = name def __call__( self , __lowerCAmelCase ) -> int: with tf.name_scope(self.name or '''WarmUp''' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. lowercase__ : Union[str, Any] = tf.cast(__lowerCAmelCase , tf.floataa ) lowercase__ : Optional[int] = tf.cast(self.warmup_steps , tf.floataa ) lowercase__ : Dict = global_step_float / warmup_steps_float lowercase__ : Dict = self.initial_learning_rate * tf.math.pow(__lowerCAmelCase , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=__lowerCAmelCase , ) def _lowerCAmelCase( self ) -> List[str]: return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0.0 , UpperCAmelCase = 0.9 , UpperCAmelCase = 0.9_9_9 , UpperCAmelCase = 1E-8 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 0.0 , UpperCAmelCase = 1.0 , UpperCAmelCase = None , ): lowercase__ : List[Any] = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=UpperCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCAmelCase , ) if num_warmup_steps: lowercase__ : Optional[Any] = WarmUp( initial_learning_rate=UpperCAmelCase , decay_schedule_fn=UpperCAmelCase , warmup_steps=UpperCAmelCase , ) if weight_decay_rate > 0.0: lowercase__ : str = AdamWeightDecay( learning_rate=UpperCAmelCase , weight_decay_rate=UpperCAmelCase , beta_a=UpperCAmelCase , beta_a=UpperCAmelCase , epsilon=UpperCAmelCase , clipnorm=UpperCAmelCase , global_clipnorm=UpperCAmelCase , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=UpperCAmelCase , ) else: lowercase__ : Any = tf.keras.optimizers.Adam( learning_rate=UpperCAmelCase , beta_a=UpperCAmelCase , beta_a=UpperCAmelCase , epsilon=UpperCAmelCase , clipnorm=UpperCAmelCase , global_clipnorm=UpperCAmelCase , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class UpperCAmelCase ( a__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase = 0.0_0_1 , __lowerCAmelCase = 0.9 , __lowerCAmelCase = 0.9_9_9 , __lowerCAmelCase = 1E-7 , __lowerCAmelCase = False , __lowerCAmelCase = 0.0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "AdamWeightDecay" , **__lowerCAmelCase , ) -> str: super().__init__(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) lowercase__ : int = weight_decay_rate lowercase__ : Dict = include_in_weight_decay lowercase__ : List[str] = exclude_from_weight_decay @classmethod def _lowerCAmelCase( cls , __lowerCAmelCase ) -> str: lowercase__ : int = {'''WarmUp''': WarmUp} return super(__lowerCAmelCase , cls ).from_config(__lowerCAmelCase , custom_objects=__lowerCAmelCase ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]: super(__lowerCAmelCase , self )._prepare_local(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowercase__ : Any = tf.constant( self.weight_decay_rate , name='''adam_weight_decay_rate''' ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple: lowercase__ : str = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , ) return tf.no_op() def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ) -> List[Any]: lowercase__ , lowercase__ : Any = list(zip(*__lowerCAmelCase ) ) return super(__lowerCAmelCase , self ).apply_gradients(zip(__lowerCAmelCase , __lowerCAmelCase ) , name=__lowerCAmelCase , **__lowerCAmelCase ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]: if apply_state is None: return self._decayed_lr_t[var_dtype], {} lowercase__ : Union[str, Any] = apply_state or {} lowercase__ : Optional[Any] = apply_state.get((var_device, var_dtype) ) if coefficients is None: lowercase__ : Union[str, Any] = self._fallback_apply_state(__lowerCAmelCase , __lowerCAmelCase ) lowercase__ : Optional[int] = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ) -> Any: lowercase__ , lowercase__ : Dict = self._get_lr(var.device , var.dtype.base_dtype , __lowerCAmelCase ) lowercase__ : List[str] = self._decay_weights_op(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) with tf.control_dependencies([decay] ): return super(__lowerCAmelCase , self )._resource_apply_dense(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ) -> List[str]: lowercase__ , lowercase__ : List[Any] = self._get_lr(var.device , var.dtype.base_dtype , __lowerCAmelCase ) lowercase__ : Union[str, Any] = self._decay_weights_op(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) with tf.control_dependencies([decay] ): return super(__lowerCAmelCase , self )._resource_apply_sparse(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def _lowerCAmelCase( self ) -> List[str]: lowercase__ : Optional[Any] = super().get_config() config.update({'''weight_decay_rate''': self.weight_decay_rate} ) return config def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[Any]: if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(__lowerCAmelCase , __lowerCAmelCase ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(__lowerCAmelCase , __lowerCAmelCase ) is not None: return False return True class UpperCAmelCase ( a__ ): '''simple docstring''' def __init__( self ) -> List[Any]: lowercase__ : List[str] = [] lowercase__ : Optional[int] = None @property def _lowerCAmelCase( self ) -> Optional[Any]: if self._accum_steps is None: lowercase__ : int = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=__lowerCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def _lowerCAmelCase( self ) -> Union[str, Any]: if not self._gradients: raise ValueError('''The accumulator should be called first to initialize the gradients''' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self , __lowerCAmelCase ) -> Optional[Any]: if not self._gradients: lowercase__ : str = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(__lowerCAmelCase ) , trainable=__lowerCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(__lowerCAmelCase ) != len(self._gradients ): raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(__lowerCAmelCase )}""" ) for accum_gradient, gradient in zip(self._gradients , __lowerCAmelCase ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(__lowerCAmelCase ) self._accum_steps.assign_add(1 ) def _lowerCAmelCase( self ) -> Optional[Any]: if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(__lowerCAmelCase ) )
214
0
'''simple docstring''' import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : int = args.log_outputs UpperCAmelCase : List[Any] = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] ) # load metric UpperCAmelCase : Dict = load_metric("""wer""" ) UpperCAmelCase : Optional[Any] = load_metric("""cer""" ) # compute metrics UpperCAmelCase : str = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] ) UpperCAmelCase : int = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] ) # print & log results UpperCAmelCase : int = F'''WER: {wer_result}\nCER: {cer_result}''' print(_lowercase ) with open(F'''{dataset_id}_eval_results.txt''' , """w""" ) as f: f.write(_lowercase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: UpperCAmelCase : Dict = F'''log_{dataset_id}_predictions.txt''' UpperCAmelCase : Tuple = F'''log_{dataset_id}_targets.txt''' with open(_lowercase , """w""" ) as p, open(_lowercase , """w""" ) as t: # mapping function to write output def write_to_file(_lowercase , _lowercase ): p.write(F'''{i}''' + """\n""" ) p.write(batch["""prediction"""] + """\n""" ) t.write(F'''{i}''' + """\n""" ) t.write(batch["""target"""] + """\n""" ) result.map(_lowercase , with_indices=_lowercase ) def __lowerCamelCase ( _lowercase ) -> str: UpperCAmelCase : str = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training UpperCAmelCase : List[str] = re.sub(_lowercase , """""" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! UpperCAmelCase : int = ["""\n\n""", """\n""", """ """, """ """] for t in token_sequences_to_ignore: UpperCAmelCase : List[Any] = """ """.join(text.split(_lowercase ) ) return text def __lowerCamelCase ( _lowercase ) -> Tuple: # load dataset UpperCAmelCase : int = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_lowercase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor UpperCAmelCase : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) UpperCAmelCase : List[Any] = feature_extractor.sampling_rate # resample audio UpperCAmelCase : Tuple = dataset.cast_column("""audio""" , Audio(sampling_rate=_lowercase ) ) # load eval pipeline if args.device is None: UpperCAmelCase : Tuple = 0 if torch.cuda.is_available() else -1 UpperCAmelCase : Union[str, Any] = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(_lowercase ): UpperCAmelCase : int = asr( batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) UpperCAmelCase : Optional[Any] = prediction["""text"""] UpperCAmelCase : Union[str, Any] = normalize_text(batch["""sentence"""] ) return batch # run inference on all examples UpperCAmelCase : List[str] = dataset.map(_lowercase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(_lowercase , _lowercase ) if __name__ == "__main__": a : Optional[Any] = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) a : Any = parser.parse_args() main(args)
265
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Optional[int] = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
265
1
def UpperCamelCase ( __lowerCamelCase : List[Any] ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError("Input series is not valid, valid series - [2, 4, 6]" ) if len(__lowerCAmelCase ) == 0: raise ValueError("Input list must be a non empty list" ) if len(__lowerCAmelCase ) == 1: return True snake_case : Any = series[1] - series[0] for index in range(len(__lowerCAmelCase ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def UpperCamelCase ( __lowerCamelCase : Any ): if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError("Input series is not valid, valid series - [2, 4, 6]" ) if len(__lowerCAmelCase ) == 0: raise ValueError("Input list must be a non empty list" ) snake_case : Any = 0 for val in series: answer += val return answer / len(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
371
import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowerCamelCase = logging.get_logger(__name__) __lowerCamelCase = """▁""" __lowerCamelCase = {"""vocab_file""": """prophetnet.tokenizer"""} __lowerCamelCase = { """vocab_file""": { """microsoft/xprophetnet-large-wiki100-cased""": ( """https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer""" ), } } __lowerCamelCase = { """microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False}, } __lowerCamelCase = { """microsoft/xprophetnet-large-wiki100-cased""": 5_12, } def UpperCamelCase ( __lowerCamelCase : Dict ): snake_case : Dict = collections.OrderedDict() with open(__lowerCamelCase , "r" , encoding="utf-8" ) as reader: snake_case : Any = reader.readlines() for index, token in enumerate(__lowerCamelCase ): snake_case : List[Any] = token.rstrip("\n" ) snake_case : int = index return vocab class UpperCAmelCase ( A_ ): A__ : Tuple = VOCAB_FILES_NAMES A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ : int = ["input_ids", "attention_mask"] def __init__(self : Any , snake_case__ : Dict , snake_case__ : List[Any]="[SEP]" , snake_case__ : Optional[int]="[SEP]" , snake_case__ : Union[str, Any]="[SEP]" , snake_case__ : List[Any]="[UNK]" , snake_case__ : List[str]="[PAD]" , snake_case__ : List[str]="[CLS]" , snake_case__ : List[Any]="[MASK]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : List[str] , ) -> None: '''simple docstring''' snake_case : Dict = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , ) try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" " pip install sentencepiece" ) raise snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(snake_case__ ) ) snake_case : Dict = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab snake_case : List[Any] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4} for i in range(10 ): snake_case : Dict = f"""[unused{i}]""" snake_case : List[str] = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab snake_case : Dict = 12 snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(snake_case__ ) def __getstate__(self : str ) -> Union[str, Any]: '''simple docstring''' snake_case : str = self.__dict__.copy() snake_case : Tuple = None return state def __setstate__(self : str , snake_case__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' snake_case : Union[str, Any] = d try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" " pip install sentencepiece" ) raise # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): snake_case : Dict = {} snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) if token_ids_a is None: return ([0] * len(snake_case__ )) + [1] return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1] def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' snake_case : List[str] = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _SCREAMING_SNAKE_CASE (self : Any ) -> int: '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset def _SCREAMING_SNAKE_CASE (self : int ) -> Any: '''simple docstring''' snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> str: '''simple docstring''' return self.sp_model.encode(snake_case__ , out_type=snake_case__ ) def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[int] ) -> Any: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] snake_case : Optional[Any] = self.sp_model.PieceToId(snake_case__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[int] ) -> int: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Dict ) -> List[Any]: '''simple docstring''' snake_case : Dict = "".join(snake_case__ ).replace(snake_case__ , " " ).strip() return out_string def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(snake_case__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case : Dict = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case__ ) elif not os.path.isfile(self.vocab_file ): with open(snake_case__ , "wb" ) as fi: snake_case : Tuple = self.sp_model.serialized_model_proto() fi.write(snake_case__ ) return (out_vocab_file,) def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.sep_token_id] snake_case : str = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
10
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class __UpperCAmelCase ( unittest.TestCase ): def __init__( self : Optional[int], __A : Dict, __A : Optional[int]=7, __A : List[Any]=3, __A : Optional[Any]=1_8, __A : Union[str, Any]=3_0, __A : Any=4_0_0, __A : Any=True, __A : Tuple=None, __A : str=True, __A : List[str]=None, __A : Optional[Any]=True, __A : str=[0.5, 0.5, 0.5], __A : int=[0.5, 0.5, 0.5], ): UpperCAmelCase : List[Any] = size if size is not None else {'''shortest_edge''': 1_8} UpperCAmelCase : Tuple = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8} UpperCAmelCase : Tuple = parent UpperCAmelCase : Optional[int] = batch_size UpperCAmelCase : Optional[Any] = num_channels UpperCAmelCase : Tuple = image_size UpperCAmelCase : Optional[int] = min_resolution UpperCAmelCase : Optional[Any] = max_resolution UpperCAmelCase : str = do_resize UpperCAmelCase : Optional[Any] = size UpperCAmelCase : Optional[Any] = do_center_crop UpperCAmelCase : Tuple = crop_size UpperCAmelCase : List[Any] = do_normalize UpperCAmelCase : str = image_mean UpperCAmelCase : List[Any] = image_std def __magic_name__ ( self : Union[str, Any] ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ): UpperCamelCase = LevitImageProcessor if is_vision_available() else None def __magic_name__ ( self : int ): UpperCAmelCase : int = LevitImageProcessingTester(self ) @property def __magic_name__ ( self : Optional[int] ): return self.image_processor_tester.prepare_image_processor_dict() def __magic_name__ ( self : Tuple ): UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__, '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase__, '''image_std''' ) ) self.assertTrue(hasattr(lowerCAmelCase__, '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__, '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase__, '''do_center_crop''' ) ) self.assertTrue(hasattr(lowerCAmelCase__, '''size''' ) ) def __magic_name__ ( self : str ): UpperCAmelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'''shortest_edge''': 1_8} ) self.assertEqual(image_processor.crop_size, {'''height''': 1_8, '''width''': 1_8} ) UpperCAmelCase : int = self.image_processing_class.from_dict(self.image_processor_dict, size=4_2, crop_size=8_4 ) self.assertEqual(image_processor.size, {'''shortest_edge''': 4_2} ) self.assertEqual(image_processor.crop_size, {'''height''': 8_4, '''width''': 8_4} ) def __magic_name__ ( self : Optional[int] ): pass def __magic_name__ ( self : Dict ): UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__, Image.Image ) # Test not batched input UpperCAmelCase : List[Any] = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ), ) # Test batched UpperCAmelCase : int = image_processing(lowerCAmelCase__, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ), ) def __magic_name__ ( self : List[Any] ): UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase__, numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__, np.ndarray ) # Test not batched input UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ), ) # Test batched UpperCAmelCase : List[Any] = image_processing(lowerCAmelCase__, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ), ) def __magic_name__ ( self : Optional[int] ): UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase__, torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__, torch.Tensor ) # Test not batched input UpperCAmelCase : Tuple = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ), ) # Test batched UpperCAmelCase : List[str] = image_processing(lowerCAmelCase__, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ), )
336
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ : Tuple = { 'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'], 'feature_extraction_mctct': ['MCTCTFeatureExtractor'], 'processing_mctct': ['MCTCTProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Tuple = [ 'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MCTCTForCTC', 'MCTCTModel', 'MCTCTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys lowercase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
324
0
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __A =get_tests_dir('''fixtures/test_sentencepiece.model''') if is_sentencepiece_available(): import sentencepiece as sp __A =5 __A =1_0 @require_sentencepiece @require_tokenizers class _SCREAMING_SNAKE_CASE ( snake_case_ , unittest.TestCase ): lowerCAmelCase__ = SpeechaTextTokenizer lowerCAmelCase__ = False lowerCAmelCase__ = True def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]: super().setUp() lowerCamelCase_ = sp.SentencePieceProcessor() spm_model.Load(lowercase ) lowerCamelCase_ = ["<s>", "<pad>", "</s>", "<unk>"] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowercase ) )] lowerCamelCase_ = dict(zip(lowercase , range(len(lowercase ) ) ) ) lowerCamelCase_ = Path(self.tmpdirname ) save_json(lowercase , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(lowercase , save_dir / VOCAB_FILES_NAMES["spm_file"] ) lowerCamelCase_ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]: lowerCamelCase_ = "<pad>" lowerCamelCase_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> Any: lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(lowercase ) , 1001 ) def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 1001 ) def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]: lowerCamelCase_ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) lowerCamelCase_ = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase ) , [289, 50, 14, 174, 386] , ) lowerCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowercase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , ) lowerCamelCase_ = tokenizer.convert_tokens_to_ids(lowercase ) self.assertListEqual(lowercase , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) lowerCamelCase_ = tokenizer.convert_ids_to_tokens(lowercase ) self.assertListEqual( lowercase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , ) @slow def SCREAMING_SNAKE_CASE_( self ) -> List[Any]: # fmt: off lowerCamelCase_ = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , ) @require_sentencepiece class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): lowerCAmelCase__ = 'valhalla/s2t_mustc_multilinguial_medium' lowerCAmelCase__ = 'C\'est trop cool' lowerCAmelCase__ = 'Esto es genial' @classmethod def SCREAMING_SNAKE_CASE_( cls ) -> Optional[Any]: lowerCamelCase_ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def SCREAMING_SNAKE_CASE_( self ) -> List[Any]: self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 ) self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 ) self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 ) self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 ) def SCREAMING_SNAKE_CASE_( self ) -> List[Any]: self.assertEqual(self.tokenizer.vocab_size , 10000 ) def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]: self.assertIn(lowercase , self.tokenizer.all_special_ids ) lowerCamelCase_ = [ES_CODE, 4, 1601, 47, 7647, 2] lowerCamelCase_ = self.tokenizer.decode(lowercase , skip_special_tokens=lowercase ) lowerCamelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase ) self.assertEqual(lowercase , lowercase ) self.assertNotIn(self.tokenizer.eos_token , lowercase ) def SCREAMING_SNAKE_CASE_( self ) -> List[Any]: lowerCamelCase_ = "fr" lowerCamelCase_ = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] , lowercase ) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id ) def SCREAMING_SNAKE_CASE_( self ) -> List[Any]: lowerCamelCase_ = "fr" self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] ) lowerCamelCase_ = "es" self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
370
import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False ): lowerCamelCase_ = OmegaConf.load(lowerCamelCase__ ) if display: print(yaml.dump(OmegaConf.to_container(lowerCamelCase__ ) ) ) return config def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ): if conf_path is None: lowerCamelCase_ = "./model_checkpoints/vqgan_only.yaml" lowerCamelCase_ = load_config(lowerCamelCase__ , display=lowerCamelCase__ ) lowerCamelCase_ = VQModel(**config.model.params ) if ckpt_path is None: lowerCamelCase_ = "./model_checkpoints/vqgan_only.pt" lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location=lowerCamelCase__ ) if ".ckpt" in ckpt_path: lowerCamelCase_ = sd["state_dict"] model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ ) model.to(lowerCamelCase__ ) del sd return model def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = model.encode(lowerCamelCase__ ) print(F'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' ) lowerCamelCase_ = model.decode(lowerCamelCase__ ) return xrec def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False ): lowerCamelCase_ , lowerCamelCase_ = string.rsplit("." , 1 ) if reload: lowerCamelCase_ = importlib.import_module(lowerCamelCase__ ) importlib.reload(lowerCamelCase__ ) return getattr(importlib.import_module(lowerCamelCase__ , package=lowerCamelCase__ ) , cls ) def lowerCamelCase_ ( lowerCamelCase__ ): if "target" not in config: raise KeyError("Expected key `target` to instantiate." ) return get_obj_from_str(config["target"] )(**config.get("params" , {} ) ) def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__=True ): lowerCamelCase_ = instantiate_from_config(lowerCamelCase__ ) if sd is not None: model.load_state_dict(lowerCamelCase__ ) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # load the specified checkpoint if ckpt: lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" ) lowerCamelCase_ = pl_sd["global_step"] print(F'loaded model from global step {global_step}.' ) else: lowerCamelCase_ = {"state_dict": None} lowerCamelCase_ = None lowerCamelCase_ = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=lowerCamelCase__ , eval_mode=lowerCamelCase__ )["model"] return model, global_step
47
0
import argparse import os import shutil from pathlib import Path import onnx import torch from packaging import version from torch.onnx import export from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline lowerCAmelCase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11') def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False , ) -> List[str]: '''simple docstring''' output_path.parent.mkdir(parents=__magic_name__ , exist_ok=__magic_name__ ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( __magic_name__ , __magic_name__ , f=output_path.as_posix() , input_names=__magic_name__ , output_names=__magic_name__ , dynamic_axes=__magic_name__ , do_constant_folding=__magic_name__ , use_external_data_format=__magic_name__ , enable_onnx_checker=__magic_name__ , opset_version=__magic_name__ , ) else: export( __magic_name__ , __magic_name__ , f=output_path.as_posix() , input_names=__magic_name__ , output_names=__magic_name__ , dynamic_axes=__magic_name__ , do_constant_folding=__magic_name__ , opset_version=__magic_name__ , ) @torch.no_grad() def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ) -> Optional[Any]: '''simple docstring''' lowercase : str = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): lowercase : Union[str, Any] = '''cuda''' elif fpaa and not torch.cuda.is_available(): raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' ) else: lowercase : int = '''cpu''' lowercase : Any = StableDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=__magic_name__ ).to(__magic_name__ ) lowercase : Tuple = Path(__magic_name__ ) # TEXT ENCODER lowercase : List[Any] = pipeline.text_encoder.config.max_position_embeddings lowercase : Optional[Any] = pipeline.text_encoder.config.hidden_size lowercase : Dict = pipeline.tokenizer( '''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=__magic_name__ , return_tensors='''pt''' , ) onnx_export( pipeline.text_encoder , model_args=(text_input.input_ids.to(device=__magic_name__ , dtype=torch.intaa )) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={ '''input_ids''': {0: '''batch''', 1: '''sequence'''}, } , opset=__magic_name__ , ) del pipeline.text_encoder # UNET lowercase : Tuple = pipeline.unet.config.in_channels lowercase : str = pipeline.unet.config.sample_size lowercase : str = output_path / '''unet''' / '''model.onnx''' onnx_export( pipeline.unet , model_args=( torch.randn(2 , __magic_name__ , __magic_name__ , __magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ ), torch.randn(2 ).to(device=__magic_name__ , dtype=__magic_name__ ), torch.randn(2 , __magic_name__ , __magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ ), False, ) , output_path=__magic_name__ , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={ '''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, '''timestep''': {0: '''batch'''}, '''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''}, } , opset=__magic_name__ , use_external_data_format=__magic_name__ , ) lowercase : List[str] = str(unet_path.absolute().as_posix() ) lowercase : Optional[int] = os.path.dirname(__magic_name__ ) lowercase : Optional[Any] = onnx.load(__magic_name__ ) # clean up existing tensor files shutil.rmtree(__magic_name__ ) os.mkdir(__magic_name__ ) # collate external tensor files into one onnx.save_model( __magic_name__ , __magic_name__ , save_as_external_data=__magic_name__ , all_tensors_to_one_file=__magic_name__ , location='''weights.pb''' , convert_attribute=__magic_name__ , ) del pipeline.unet # VAE ENCODER lowercase : Union[str, Any] = pipeline.vae lowercase : Tuple = vae_encoder.config.in_channels lowercase : Any = vae_encoder.config.sample_size # need to get the raw tensor output (sample) from the encoder lowercase : int = lambda __magic_name__ , __magic_name__ : vae_encoder.encode(__magic_name__ , __magic_name__ )[0].sample() onnx_export( __magic_name__ , model_args=( torch.randn(1 , __magic_name__ , __magic_name__ , __magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ ), False, ) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={ '''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, } , opset=__magic_name__ , ) # VAE DECODER lowercase : int = pipeline.vae lowercase : Union[str, Any] = vae_decoder.config.latent_channels lowercase : List[str] = vae_decoder.config.out_channels # forward only through the decoder part lowercase : Optional[Any] = vae_encoder.decode onnx_export( __magic_name__ , model_args=( torch.randn(1 , __magic_name__ , __magic_name__ , __magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ ), False, ) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={ '''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, } , opset=__magic_name__ , ) del pipeline.vae # SAFETY CHECKER if pipeline.safety_checker is not None: lowercase : Dict = pipeline.safety_checker lowercase : Union[str, Any] = safety_checker.config.vision_config.num_channels lowercase : str = safety_checker.config.vision_config.image_size lowercase : Union[str, Any] = safety_checker.forward_onnx onnx_export( pipeline.safety_checker , model_args=( torch.randn( 1 , __magic_name__ , __magic_name__ , __magic_name__ , ).to(device=__magic_name__ , dtype=__magic_name__ ), torch.randn(1 , __magic_name__ , __magic_name__ , __magic_name__ ).to(device=__magic_name__ , dtype=__magic_name__ ), ) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={ '''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, '''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''}, } , opset=__magic_name__ , ) del pipeline.safety_checker lowercase : Dict = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''' ) lowercase : Dict = pipeline.feature_extractor else: lowercase : int = None lowercase : List[Any] = None lowercase : Dict = OnnxStableDiffusionPipeline( vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''' ) , scheduler=pipeline.scheduler , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , requires_safety_checker=safety_checker is not None , ) onnx_pipeline.save_pretrained(__magic_name__ ) print('''ONNX pipeline saved to''' , __magic_name__ ) del pipeline del onnx_pipeline lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(__magic_name__ , provider='''CPUExecutionProvider''' ) print('''ONNX pipeline is loadable''' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '--model_path', type=str, required=True, help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).', ) parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.') parser.add_argument( '--opset', default=14, type=int, help='The version of the ONNX operator set to use.', ) parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode') lowerCAmelCase_ = parser.parse_args() convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
308
import os def snake_case( __magic_name__ = "input.txt" ) -> int: '''simple docstring''' with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file: lowercase : Any = [ [int(__magic_name__ ) for element in line.split(''',''' )] for line in input_file.readlines() ] lowercase : List[Any] = len(__magic_name__ ) lowercase : Any = len(matrix[0] ) lowercase : Tuple = [[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )] for i in range(__magic_name__ ): lowercase : str = matrix[i][0] for j in range(1 , __magic_name__ ): for i in range(__magic_name__ ): lowercase : Any = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , __magic_name__ ): lowercase : Any = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): lowercase : Any = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'''{solution() = }''')
308
1
'''simple docstring''' import math def UpperCamelCase_ ( A__ : int ): '''simple docstring''' lowerCAmelCase_ : int = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(A__ ) def UpperCamelCase_ ( A__ : float = 1 / 1_23_45 ): '''simple docstring''' lowerCAmelCase_ : int = 0 lowerCAmelCase_ : List[str] = 0 lowerCAmelCase_ : str = 3 while True: lowerCAmelCase_ : Union[str, Any] = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(A__ ): lowerCAmelCase_ : Dict = int(A__ ) total_partitions += 1 if check_partition_perfect(A__ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(A__ ) integer += 1 if __name__ == "__main__": print(F'''{solution() = }''')
89
'''simple docstring''' import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __snake_case : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : Tuple=13 , lowerCamelCase : Dict=30 , lowerCamelCase : Dict=2 , lowerCamelCase : Optional[int]=3 , lowerCamelCase : List[Any]=True , lowerCamelCase : Any=True , lowerCamelCase : str=32 , lowerCamelCase : Any=5 , lowerCamelCase : int=4 , lowerCamelCase : List[str]=37 , lowerCamelCase : Any="gelu" , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : str=10 , lowerCamelCase : Optional[Any]=0.02 , lowerCamelCase : List[str]=3 , lowerCamelCase : Union[str, Any]=0.6 , lowerCamelCase : List[Any]=None , ) -> Optional[int]: lowerCAmelCase_ : Optional[Any] = parent lowerCAmelCase_ : Optional[int] = batch_size lowerCAmelCase_ : int = image_size lowerCAmelCase_ : List[Any] = patch_size lowerCAmelCase_ : int = num_channels lowerCAmelCase_ : Any = is_training lowerCAmelCase_ : Tuple = use_labels lowerCAmelCase_ : Optional[Any] = hidden_size lowerCAmelCase_ : List[Any] = num_hidden_layers lowerCAmelCase_ : Optional[Any] = num_attention_heads lowerCAmelCase_ : Dict = intermediate_size lowerCAmelCase_ : Union[str, Any] = hidden_act lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob lowerCAmelCase_ : Any = attention_probs_dropout_prob lowerCAmelCase_ : List[Any] = type_sequence_label_size lowerCAmelCase_ : Dict = initializer_range lowerCAmelCase_ : List[str] = mask_ratio lowerCAmelCase_ : Tuple = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowerCAmelCase_ : Union[str, Any] = (image_size // patch_size) ** 2 lowerCAmelCase_ : Any = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def __lowercase ( self : Optional[int] ) -> str: lowerCAmelCase_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase_ : Optional[int] = None if self.use_labels: lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase_ : str = self.get_config() return config, pixel_values, labels def __lowercase ( self : Optional[int] ) -> Optional[int]: return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def __lowercase ( self : Any , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict ) -> Tuple: lowerCAmelCase_ : Tuple = ViTMAEModel(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCAmelCase_ : Dict = model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase ( self : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] ) -> Dict: lowerCAmelCase_ : Tuple = ViTMAEForPreTraining(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCAmelCase_ : List[str] = model(lowerCamelCase ) lowerCAmelCase_ : int = (self.image_size // self.patch_size) ** 2 lowerCAmelCase_ : int = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowerCAmelCase_ : List[Any] = 1 lowerCAmelCase_ : List[str] = ViTMAEForPreTraining(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase_ : Tuple = model(lowerCamelCase ) lowerCAmelCase_ : List[Any] = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def __lowercase ( self : Optional[int] ) -> str: lowerCAmelCase_ : Any = self.prepare_config_and_inputs() lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : List[Any] = config_and_inputs lowerCAmelCase_ : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __snake_case ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,unittest.TestCase): """simple docstring""" lowercase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () lowercase = {'feature-extraction': ViTMAEModel} if is_torch_available() else {} lowercase = False lowercase = False lowercase = False lowercase = False def __lowercase ( self : Optional[Any] ) -> List[Any]: lowerCAmelCase_ : Optional[int] = ViTMAEModelTester(self ) lowerCAmelCase_ : Optional[int] = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 ) def __lowercase ( self : Dict ) -> Tuple: self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def __lowercase ( self : Optional[int] ) -> Optional[int]: pass def __lowercase ( self : List[str] ) -> Tuple: lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : List[str] = model_class(lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase_ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) ) def __lowercase ( self : Optional[Any] ) -> Any: lowerCAmelCase_, lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Optional[int] = model_class(lowerCamelCase ) lowerCAmelCase_ : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ : Any = [*signature.parameters.keys()] lowerCAmelCase_ : Optional[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase ) def __lowercase ( self : Tuple ) -> str: lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def __lowercase ( self : Optional[int] ) -> str: lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCamelCase ) def __lowercase ( self : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] ) -> str: # make masks reproducible np.random.seed(2 ) lowerCAmelCase_ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) lowerCAmelCase_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCAmelCase_ : Optional[Any] = torch.from_numpy(lowerCamelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowerCAmelCase_ : int = pt_noise super().check_pt_tf_models(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def __lowercase ( self : int ) -> Dict: lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ : Optional[int] = model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): lowerCAmelCase_ : Any = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) ) lowerCAmelCase_ : Any = outputs[0].cpu().numpy() lowerCAmelCase_ : List[str] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase ) lowerCAmelCase_ : int = model_class.from_pretrained(lowerCamelCase ) model.to(lowerCamelCase ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): lowerCAmelCase_ : str = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) ) # Make sure we don't have nans lowerCAmelCase_ : Optional[Any] = after_outputs[0].cpu().numpy() lowerCAmelCase_ : str = 0 lowerCAmelCase_ : List[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase , 1E-5 ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def __lowercase ( self : Optional[int] ) -> List[Any]: pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def __lowercase ( self : Union[str, Any] ) -> str: pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def __lowercase ( self : Optional[Any] ) -> Union[str, Any]: pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def __lowercase ( self : Tuple ) -> Optional[Any]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def __lowercase ( self : List[Any] ) -> str: pass @slow def __lowercase ( self : List[str] ) -> List[Any]: for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase_ : List[Any] = ViTMAEModel.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) def UpperCamelCase_ ( ): '''simple docstring''' lowerCAmelCase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __snake_case ( unittest.TestCase): """simple docstring""" @cached_property def __lowercase ( self : Union[str, Any] ) -> Union[str, Any]: return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def __lowercase ( self : int ) -> List[Any]: # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowerCAmelCase_ : Dict = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(lowerCamelCase ) lowerCAmelCase_ : Union[str, Any] = self.default_image_processor lowerCAmelCase_ : Union[str, Any] = prepare_img() lowerCAmelCase_ : Dict = image_processor(images=lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowerCAmelCase_ : Optional[int] = ViTMAEConfig() lowerCAmelCase_ : Optional[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowerCAmelCase_ : Optional[int] = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): lowerCAmelCase_ : str = model(**lowerCamelCase , noise=torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase ) ) # verify the logits lowerCAmelCase_ : str = torch.Size((1, 1_96, 7_68) ) self.assertEqual(outputs.logits.shape , lowerCamelCase ) lowerCAmelCase_ : str = torch.tensor( [[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCamelCase ) , atol=1E-4 ) )
89
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor a_ : List[str] = logging.get_logger(__name__) class snake_case ( _SCREAMING_SNAKE_CASE ): """simple docstring""" def __init__( self , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" warnings.warn( "The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use PoolFormerImageProcessor instead." , _lowerCAmelCase , ) super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
55
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def UpperCAmelCase_ ( __UpperCAmelCase : bytes , __UpperCAmelCase : int ) -> np.array: SCREAMING_SNAKE_CASE_ = f"{sampling_rate}" SCREAMING_SNAKE_CASE_ = '1' SCREAMING_SNAKE_CASE_ = 'f32le' SCREAMING_SNAKE_CASE_ = [ 'ffmpeg', '-i', 'pipe:0', '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] try: with subprocess.Popen(__UpperCAmelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: SCREAMING_SNAKE_CASE_ = ffmpeg_process.communicate(__UpperCAmelCase ) except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error SCREAMING_SNAKE_CASE_ = output_stream[0] SCREAMING_SNAKE_CASE_ = np.frombuffer(__UpperCAmelCase , np.floataa ) if audio.shape[0] == 0: raise ValueError('Malformed soundfile' ) return audio def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : float , __UpperCAmelCase : str = "f32le" , ) -> int: SCREAMING_SNAKE_CASE_ = f"{sampling_rate}" SCREAMING_SNAKE_CASE_ = '1' if format_for_conversion == "s16le": SCREAMING_SNAKE_CASE_ = 2 elif format_for_conversion == "f32le": SCREAMING_SNAKE_CASE_ = 4 else: raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" ) SCREAMING_SNAKE_CASE_ = platform.system() if system == "Linux": SCREAMING_SNAKE_CASE_ = 'alsa' SCREAMING_SNAKE_CASE_ = 'default' elif system == "Darwin": SCREAMING_SNAKE_CASE_ = 'avfoundation' SCREAMING_SNAKE_CASE_ = ':0' elif system == "Windows": SCREAMING_SNAKE_CASE_ = 'dshow' SCREAMING_SNAKE_CASE_ = 'default' SCREAMING_SNAKE_CASE_ = [ 'ffmpeg', '-f', format_, '-i', input_, '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-fflags', 'nobuffer', '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] SCREAMING_SNAKE_CASE_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample SCREAMING_SNAKE_CASE_ = _ffmpeg_stream(__UpperCAmelCase , __UpperCAmelCase ) for item in iterator: yield item def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : float , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[Union[Tuple[float, float], float]] = None , __UpperCAmelCase : str = "f32le" , ) -> Tuple: if stream_chunk_s is not None: SCREAMING_SNAKE_CASE_ = stream_chunk_s else: SCREAMING_SNAKE_CASE_ = chunk_length_s SCREAMING_SNAKE_CASE_ = ffmpeg_microphone(__UpperCAmelCase , __UpperCAmelCase , format_for_conversion=__UpperCAmelCase ) if format_for_conversion == "s16le": SCREAMING_SNAKE_CASE_ = np.intaa SCREAMING_SNAKE_CASE_ = 2 elif format_for_conversion == "f32le": SCREAMING_SNAKE_CASE_ = np.floataa SCREAMING_SNAKE_CASE_ = 4 else: raise ValueError(f"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" ) if stride_length_s is None: SCREAMING_SNAKE_CASE_ = chunk_length_s / 6 SCREAMING_SNAKE_CASE_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(__UpperCAmelCase , (int, float) ): SCREAMING_SNAKE_CASE_ = [stride_length_s, stride_length_s] SCREAMING_SNAKE_CASE_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample SCREAMING_SNAKE_CASE_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample SCREAMING_SNAKE_CASE_ = datetime.datetime.now() SCREAMING_SNAKE_CASE_ = datetime.timedelta(seconds=__UpperCAmelCase ) for item in chunk_bytes_iter(__UpperCAmelCase , __UpperCAmelCase , stride=(stride_left, stride_right) , stream=__UpperCAmelCase ): # Put everything back in numpy scale SCREAMING_SNAKE_CASE_ = np.frombuffer(item['raw'] , dtype=__UpperCAmelCase ) SCREAMING_SNAKE_CASE_ = ( item['stride'][0] // size_of_sample, item['stride'][1] // size_of_sample, ) SCREAMING_SNAKE_CASE_ = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple[int, int] , __UpperCAmelCase : bool = False ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = b'' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = stride if stride_left + stride_right >= chunk_len: raise ValueError( f"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" ) SCREAMING_SNAKE_CASE_ = 0 for raw in iterator: acc += raw if stream and len(__UpperCAmelCase ) < chunk_len: SCREAMING_SNAKE_CASE_ = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(__UpperCAmelCase ) >= chunk_len: # We are flushing the accumulator SCREAMING_SNAKE_CASE_ = (_stride_left, stride_right) SCREAMING_SNAKE_CASE_ = {'raw': acc[:chunk_len], 'stride': stride} if stream: SCREAMING_SNAKE_CASE_ = False yield item SCREAMING_SNAKE_CASE_ = stride_left SCREAMING_SNAKE_CASE_ = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(__UpperCAmelCase ) > stride_left: SCREAMING_SNAKE_CASE_ = {'raw': acc, 'stride': (_stride_left, 0)} if stream: SCREAMING_SNAKE_CASE_ = False yield item def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : int ) -> Optional[Any]: SCREAMING_SNAKE_CASE_ = 2**24 # 16Mo try: with subprocess.Popen(__UpperCAmelCase , stdout=subprocess.PIPE , bufsize=__UpperCAmelCase ) as ffmpeg_process: while True: SCREAMING_SNAKE_CASE_ = ffmpeg_process.stdout.read(__UpperCAmelCase ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
225
0
'''simple docstring''' from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar _SCREAMING_SNAKE_CASE : Tuple = TypeVar("T") def UpperCamelCase_( snake_case : int ): '''simple docstring''' return (position - 1) // 2 def UpperCamelCase_( snake_case : int ): '''simple docstring''' return (2 * position) + 1 def UpperCamelCase_( snake_case : int ): '''simple docstring''' return (2 * position) + 2 class _snake_case ( Generic[T] ): def __init__( self ) -> None: '''simple docstring''' snake_case_ = [] snake_case_ = {} snake_case_ = 0 def __len__( self ) -> int: '''simple docstring''' return self.elements def __repr__( self ) -> str: '''simple docstring''' return str(self.heap ) def lowerCAmelCase__ ( self ) -> bool: '''simple docstring''' return self.elements == 0 def lowerCAmelCase__ ( self , a__ , a__ ) -> None: '''simple docstring''' self.heap.append((elem, weight) ) snake_case_ = self.elements self.elements += 1 self._bubble_up(a__ ) def lowerCAmelCase__ ( self ) -> T: '''simple docstring''' if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) snake_case_ , snake_case_ = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: snake_case_ , snake_case_ = self.heap[0] self._bubble_down(a__ ) return elem def lowerCAmelCase__ ( self , a__ , a__ ) -> None: '''simple docstring''' snake_case_ = self.position_map[elem] snake_case_ = (elem, weight) if position > 0: snake_case_ = get_parent_position(a__ ) snake_case_ , snake_case_ = self.heap[parent_position] if parent_weight > weight: self._bubble_up(a__ ) else: self._bubble_down(a__ ) else: self._bubble_down(a__ ) def lowerCAmelCase__ ( self , a__ ) -> None: '''simple docstring''' snake_case_ = self.position_map[elem] if curr_pos == 0: return None snake_case_ = get_parent_position(a__ ) snake_case_ , snake_case_ = self.heap[curr_pos] snake_case_ , snake_case_ = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(a__ , a__ ) return self._bubble_up(a__ ) return None def lowerCAmelCase__ ( self , a__ ) -> None: '''simple docstring''' snake_case_ = self.position_map[elem] snake_case_ , snake_case_ = self.heap[curr_pos] snake_case_ = get_child_left_position(a__ ) snake_case_ = get_child_right_position(a__ ) if child_left_position < self.elements and child_right_position < self.elements: snake_case_ , snake_case_ = self.heap[child_left_position] snake_case_ , snake_case_ = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(a__ , a__ ) return self._bubble_down(a__ ) if child_left_position < self.elements: snake_case_ , snake_case_ = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(a__ , a__ ) return self._bubble_down(a__ ) else: return None if child_right_position < self.elements: snake_case_ , snake_case_ = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(a__ , a__ ) return self._bubble_down(a__ ) return None def lowerCAmelCase__ ( self , a__ , a__ ) -> None: '''simple docstring''' snake_case_ = self.heap[nodea_pos][0] snake_case_ = self.heap[nodea_pos][0] snake_case_ , snake_case_ = ( self.heap[nodea_pos], self.heap[nodea_pos], ) snake_case_ = nodea_pos snake_case_ = nodea_pos class _snake_case ( Generic[T] ): def __init__( self ) -> None: '''simple docstring''' snake_case_ = {} snake_case_ = 0 def __repr__( self ) -> str: '''simple docstring''' return str(self.connections ) def __len__( self ) -> int: '''simple docstring''' return self.nodes def lowerCAmelCase__ ( self , a__ ) -> None: '''simple docstring''' if node not in self.connections: snake_case_ = {} self.nodes += 1 def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> None: '''simple docstring''' self.add_node(a__ ) self.add_node(a__ ) snake_case_ = weight snake_case_ = weight def UpperCamelCase_( snake_case : GraphUndirectedWeighted[T] , ): '''simple docstring''' snake_case_ = {node: maxsize for node in graph.connections} snake_case_ = {node: None for node in graph.connections} snake_case_ = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(snake_case , snake_case ) if priority_queue.is_empty(): return dist, parent # initialization snake_case_ = priority_queue.extract_min() snake_case_ = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: snake_case_ = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(snake_case , dist[neighbour] ) snake_case_ = node # running prim's algorithm while not priority_queue.is_empty(): snake_case_ = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: snake_case_ = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(snake_case , dist[neighbour] ) snake_case_ = node return dist, parent
92
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : Optional[int] = { "edbeeching/decision-transformer-gym-hopper-medium": ( "https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class _snake_case ( lowercase_ ): lowerCAmelCase_ : Dict = "decision_transformer" lowerCAmelCase_ : List[Any] = ["past_key_values"] lowerCAmelCase_ : Tuple = { "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , a__=17 , a__=4 , a__=128 , a__=4_096 , a__=True , a__=1 , a__=1_024 , a__=3 , a__=1 , a__=None , a__="relu" , a__=0.1 , a__=0.1 , a__=0.1 , a__=1e-5 , a__=0.0_2 , a__=True , a__=True , a__=50_256 , a__=50_256 , a__=False , a__=False , **a__ , ) -> Optional[int]: '''simple docstring''' snake_case_ = state_dim snake_case_ = act_dim snake_case_ = hidden_size snake_case_ = max_ep_len snake_case_ = action_tanh snake_case_ = vocab_size snake_case_ = n_positions snake_case_ = n_layer snake_case_ = n_head snake_case_ = n_inner snake_case_ = activation_function snake_case_ = resid_pdrop snake_case_ = embd_pdrop snake_case_ = attn_pdrop snake_case_ = layer_norm_epsilon snake_case_ = initializer_range snake_case_ = scale_attn_weights snake_case_ = use_cache snake_case_ = scale_attn_by_inverse_layer_idx snake_case_ = reorder_and_upcast_attn snake_case_ = bos_token_id snake_case_ = eos_token_id super().__init__(bos_token_id=a__ , eos_token_id=a__ , **a__ )
92
1
'''simple docstring''' import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__) class __A ( UpperCamelCase__ ): a__ : Optional[int] = ["""input_features""", """is_longer"""] def __init__(self : Tuple , __a : Optional[int]=64 , __a : Union[str, Any]=48000 , __a : str=480 , __a : Optional[int]=10 , __a : List[Any]=1024 , __a : Optional[Any]=0.0 , __a : Tuple=False , __a : float = 0 , __a : float = 14000 , __a : int = None , __a : str = "fusion" , __a : str = "repeatpad" , **__a : Any , ): super().__init__( feature_size=__a , sampling_rate=__a , padding_value=__a , return_attention_mask=__a , **__a , ) UpperCAmelCase_ = top_db UpperCAmelCase_ = truncation UpperCAmelCase_ = padding UpperCAmelCase_ = fft_window_size UpperCAmelCase_ = (fft_window_size >> 1) + 1 UpperCAmelCase_ = hop_length UpperCAmelCase_ = max_length_s UpperCAmelCase_ = max_length_s * sampling_rate UpperCAmelCase_ = sampling_rate UpperCAmelCase_ = frequency_min UpperCAmelCase_ = frequency_max UpperCAmelCase_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__a , min_frequency=__a , max_frequency=__a , sampling_rate=__a , norm=__a , mel_scale="htk" , ) UpperCAmelCase_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__a , min_frequency=__a , max_frequency=__a , sampling_rate=__a , norm="slaney" , mel_scale="slaney" , ) def _lowercase (self : List[str] ): UpperCAmelCase_ = copy.deepcopy(self.__dict__ ) UpperCAmelCase_ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def _lowercase (self : str , __a : np.array , __a : Optional[np.array] = None ): UpperCAmelCase_ = spectrogram( __a , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__a , log_mel="dB" , ) return log_mel_spectrogram.T def _lowercase (self : List[Any] , __a : Dict , __a : Optional[Any] , __a : Tuple ): UpperCAmelCase_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk UpperCAmelCase_ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk UpperCAmelCase_ = [0] # randomly choose index for each part UpperCAmelCase_ = np.random.choice(ranges[0] ) UpperCAmelCase_ = np.random.choice(ranges[1] ) UpperCAmelCase_ = np.random.choice(ranges[2] ) UpperCAmelCase_ = mel[idx_front : idx_front + chunk_frames, :] UpperCAmelCase_ = mel[idx_middle : idx_middle + chunk_frames, :] UpperCAmelCase_ = mel[idx_back : idx_back + chunk_frames, :] UpperCAmelCase_ = torch.tensor(mel[None, None, :] ) UpperCAmelCase_ = torch.nn.functional.interpolate( __a , size=[chunk_frames, 64] , mode="bilinear" , align_corners=__a ) UpperCAmelCase_ = mel_shrink[0][0].numpy() UpperCAmelCase_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def _lowercase (self : Optional[Any] , __a : np.array , __a : Any , __a : List[str] , __a : Optional[Any] ): if waveform.shape[0] > max_length: if truncation == "rand_trunc": UpperCAmelCase_ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad UpperCAmelCase_ = len(__a ) - max_length UpperCAmelCase_ = np.random.randint(0 , overflow + 1 ) UpperCAmelCase_ = waveform[idx : idx + max_length] UpperCAmelCase_ = self._np_extract_fbank_features(__a , self.mel_filters_slaney )[None, :] elif truncation == "fusion": UpperCAmelCase_ = self._np_extract_fbank_features(__a , self.mel_filters ) UpperCAmelCase_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed UpperCAmelCase_ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. UpperCAmelCase_ = np.stack([mel, mel, mel, mel] , axis=0 ) UpperCAmelCase_ = False else: UpperCAmelCase_ = self._random_mel_fusion(__a , __a , __a ) UpperCAmelCase_ = True else: raise NotImplementedError(f"""data_truncating {truncation} not implemented""" ) else: UpperCAmelCase_ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": UpperCAmelCase_ = int(max_length / len(__a ) ) UpperCAmelCase_ = np.stack(np.tile(__a , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": UpperCAmelCase_ = int(max_length / len(__a ) ) UpperCAmelCase_ = np.stack(np.tile(__a , __a ) ) UpperCAmelCase_ = np.pad(__a , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 ) if truncation == "fusion": UpperCAmelCase_ = self._np_extract_fbank_features(__a , self.mel_filters ) UpperCAmelCase_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: UpperCAmelCase_ = self._np_extract_fbank_features(__a , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__(self : Any , __a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a : str = None , __a : Optional[str] = None , __a : Optional[int] = None , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , **__a : int , ): UpperCAmelCase_ = truncation if truncation is not None else self.truncation UpperCAmelCase_ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) UpperCAmelCase_ = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) UpperCAmelCase_ = is_batched_numpy or ( isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase_ = [np.asarray(__a , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(__a , np.ndarray ): UpperCAmelCase_ = np.asarray(__a , dtype=np.floataa ) elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCAmelCase_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase_ = [np.asarray(__a )] # convert to mel spectrogram, truncate and pad if needed. UpperCAmelCase_ = [ self._get_input_mel(__a , max_length if max_length else self.nb_max_samples , __a , __a ) for waveform in raw_speech ] UpperCAmelCase_ = [] UpperCAmelCase_ = [] for mel, longer in padded_inputs: input_mel.append(__a ) is_longer.append(__a ) if truncation == "fusion" and sum(__a ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer UpperCAmelCase_ = np.random.randint(0 , len(__a ) ) UpperCAmelCase_ = True if isinstance(input_mel[0] , __a ): UpperCAmelCase_ = [np.asarray(__a , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool UpperCAmelCase_ = [[longer] for longer in is_longer] UpperCAmelCase_ = {"input_features": input_mel, "is_longer": is_longer} UpperCAmelCase_ = BatchFeature(__a ) if return_tensors is not None: UpperCAmelCase_ = input_features.convert_to_tensors(__a ) return input_features
1
'''simple docstring''' import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class __A ( unittest.TestCase ): def __init__(self : str , __a : Optional[Any] , __a : Optional[Any]=13 , __a : int=30 , __a : Union[str, Any]=2 , __a : Dict=3 , __a : List[Any]=True , __a : Optional[Any]=True , __a : List[Any]=32 , __a : Any=5 , __a : str=4 , __a : Optional[int]=37 , __a : Optional[int]="gelu" , __a : List[str]=0.1 , __a : Tuple=0.1 , __a : List[str]=10 , __a : Optional[int]=0.02 , ): UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = image_size UpperCAmelCase_ = patch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = is_training UpperCAmelCase_ = use_labels UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ = (image_size // patch_size) ** 2 UpperCAmelCase_ = num_patches + 1 def _lowercase (self : Any ): UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , ) return config, pixel_values def _lowercase (self : Dict , __a : Any , __a : List[Any] ): UpperCAmelCase_ = FlaxViTModel(config=__a ) UpperCAmelCase_ = model(__a ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ = (self.image_size, self.image_size) UpperCAmelCase_ = (self.patch_size, self.patch_size) UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def _lowercase (self : Tuple , __a : str , __a : Any ): UpperCAmelCase_ = self.type_sequence_label_size UpperCAmelCase_ = FlaxViTForImageClassification(config=__a ) UpperCAmelCase_ = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ = 1 UpperCAmelCase_ = FlaxViTForImageClassification(__a ) UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ = model(__a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) = config_and_inputs UpperCAmelCase_ = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class __A ( UpperCamelCase__ , unittest.TestCase ): a__ : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def _lowercase (self : Any ): UpperCAmelCase_ = FlaxViTModelTester(self ) UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def _lowercase (self : Tuple ): self.config_tester.run_common_tests() def _lowercase (self : str ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def _lowercase (self : str ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) def _lowercase (self : Tuple ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(__a ) UpperCAmelCase_ = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ = [*signature.parameters.keys()] UpperCAmelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , __a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ = self._prepare_for_class(__a , __a ) UpperCAmelCase_ = model_class(__a ) @jax.jit def model_jitted(__a : Tuple , **__a : List[Any] ): return model(pixel_values=__a , **__a ) with self.subTest("JIT Enabled" ): UpperCAmelCase_ = model_jitted(**__a ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): UpperCAmelCase_ = model_jitted(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) ) for jitted_output, output in zip(__a , __a ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _lowercase (self : Tuple ): for model_class_name in self.all_model_classes: UpperCAmelCase_ = model_class_name.from_pretrained("google/vit-base-patch16-224" ) UpperCAmelCase_ = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(__a )
1
1
"""simple docstring""" from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class _UpperCAmelCase ( _a ): a__ : str = CustomTokenizer pass
368
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowercase : List[Any] = { 'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'], 'tokenization_mvp': ['MvpTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : int = ['MvpTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Union[str, Any] = [ 'MVP_PRETRAINED_MODEL_ARCHIVE_LIST', 'MvpForCausalLM', 'MvpForConditionalGeneration', 'MvpForQuestionAnswering', 'MvpForSequenceClassification', 'MvpModel', 'MvpPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
86
0
def UpperCamelCase( __UpperCamelCase : str ): return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(__UpperCamelCase ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__('''doctest''').testmod()
103
from torch import nn def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] ): """simple docstring""" if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F'Unsupported activation function: {act_fn}' )
147
0
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = SMALL_MODEL_IDENTIFIER _lowerCAmelCase : Optional[int] = 'pt' _lowerCAmelCase : Tuple = 'tf' def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case__ ) model_tf.save_pretrained(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Tuple = 'mock_framework' # Framework provided - return whatever the user provides _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case__ ) _lowerCAmelCase : Dict = FeaturesManager.determine_framework(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case__ ) _lowerCAmelCase : int = FeaturesManager.determine_framework(snake_case__ , snake_case__ ) self.assertEqual(snake_case__ , snake_case__ ) def a ( self ): '''simple docstring''' with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(snake_case__ ) _lowerCAmelCase : Tuple = FeaturesManager.determine_framework(snake_case__ ) self.assertEqual(snake_case__ , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(snake_case__ ) _lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(snake_case__ ) self.assertEqual(snake_case__ , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(snake_case__ ): _lowerCAmelCase : str = FeaturesManager.determine_framework(snake_case__ ) def a ( self ): '''simple docstring''' _lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ): _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_pt ) # PyTorch not in environment -> use TensorFlow _lowerCAmelCase : Any = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_torch_available' , snake_case__ ): _lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_tf ) # Both in environment -> use PyTorch _lowerCAmelCase : int = MagicMock(return_value=snake_case__ ) _lowerCAmelCase : Optional[int] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch( 'transformers.onnx.features.is_torch_available' , snake_case__ ): _lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(snake_case__ , self.framework_pt ) # Both not in environment -> raise error _lowerCAmelCase : str = MagicMock(return_value=snake_case__ ) _lowerCAmelCase : Optional[Any] = MagicMock(return_value=snake_case__ ) with patch('transformers.onnx.features.is_tf_available' , snake_case__ ), patch( 'transformers.onnx.features.is_torch_available' , snake_case__ ): with self.assertRaises(snake_case__ ): _lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model )
25
'''simple docstring''' from __future__ import annotations from typing import Any def lowercase (_A ): """simple docstring""" if not postfix_notation: return 0 _lowerCAmelCase : int = {'+', '-', '*', '/'} _lowerCAmelCase : list[Any] = [] for token in postfix_notation: if token in operations: _lowerCAmelCase , _lowerCAmelCase : Tuple = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(_A ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
25
1
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class _A : UpperCamelCase__ : Dict = BlenderbotSmallConfig UpperCamelCase__ : Tuple = {} UpperCamelCase__ : str = '''gelu''' def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Optional[int]=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Dict=99 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : int=37 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=20 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=1 , __SCREAMING_SNAKE_CASE : List[Any]=0 , ): '''simple docstring''' __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_labels __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = eos_token_id __a = pad_token_id __a = bos_token_id def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) __a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1) __a = tf.concat([input_ids, eos_tensor] , axis=1) __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __a = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __a = prepare_blenderbot_small_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) return config, inputs_dict def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any): '''simple docstring''' __a = TFBlenderbotSmallModel(config=__SCREAMING_SNAKE_CASE).get_decoder() __a = inputs_dict['''input_ids'''] __a = input_ids[:1, :] __a = inputs_dict['''attention_mask'''][:1, :] __a = inputs_dict['''head_mask'''] __a = 1 # first forward pass __a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE) __a , __a = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __a = ids_tensor((self.batch_size, 3) , config.vocab_size) __a = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta) # append to next input_ids and __a = tf.concat([input_ids, next_tokens] , axis=-1) __a = tf.concat([attention_mask, next_attn_mask] , axis=-1) __a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)[0] __a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE)[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1]) # select random slice __a = int(ids_tensor((1,) , output_from_past.shape[-1])) __a = output_from_no_past[:, -3:, random_slice_idx] __a = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , rtol=1E-3) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ): if attention_mask is None: __a = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __a = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __a = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __a = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __a = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ): UpperCamelCase__ : Union[str, Any] = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) UpperCamelCase__ : Union[str, Any] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () UpperCamelCase__ : str = ( { '''conversational''': TFBlenderbotSmallForConditionalGeneration, '''feature-extraction''': TFBlenderbotSmallModel, '''summarization''': TFBlenderbotSmallForConditionalGeneration, '''text2text-generation''': TFBlenderbotSmallForConditionalGeneration, '''translation''': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) UpperCamelCase__ : Optional[int] = True UpperCamelCase__ : int = False UpperCamelCase__ : str = False def _lowerCamelCase ( self : int): '''simple docstring''' __a = TFBlenderbotSmallModelTester(self) __a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str): '''simple docstring''' self.config_tester.run_common_tests() def _lowerCamelCase ( self : List[str]): '''simple docstring''' __a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE) @require_tokenizers @require_tf class _A ( unittest.TestCase ): UpperCamelCase__ : Union[str, Any] = [ '''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ''' ''' i\'m going to throw up.\nand why is that?''' ] UpperCamelCase__ : str = '''facebook/blenderbot_small-90M''' @cached_property def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''') @cached_property def _lowerCamelCase ( self : Optional[int]): '''simple docstring''' __a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name) return model @slow def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = self.tokenizer(self.src_text , return_tensors='''tf''') __a = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__SCREAMING_SNAKE_CASE , ) __a = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__SCREAMING_SNAKE_CASE)[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
49
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : Optional[Any] = { "MIT/ast-finetuned-audioset-10-10-0.4593": ( "https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json" ), } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """audio-spectrogram-transformer""" def __init__( self : int , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : int=1_2 , __UpperCamelCase : List[Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Union[str, Any]=1e-12 , __UpperCamelCase : Optional[Any]=1_6 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : int=1_0 , __UpperCamelCase : Optional[int]=1_0 , __UpperCamelCase : str=1_0_2_4 , __UpperCamelCase : Optional[Any]=1_2_8 , **__UpperCamelCase : Any , )->Tuple: super().__init__(**__UpperCamelCase ) _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = patch_size _UpperCAmelCase = qkv_bias _UpperCAmelCase = frequency_stride _UpperCAmelCase = time_stride _UpperCAmelCase = max_length _UpperCAmelCase = num_mel_bins
260
0
"""simple docstring""" import baseaa def A ( snake_case :str ) -> bytes: return baseaa.aaaencode(string.encode('utf-8' ) ) def A ( snake_case :bytes ) -> str: return baseaa.aaadecode(snake_case ).decode('utf-8' ) if __name__ == "__main__": import doctest doctest.testmod()
263
"""simple docstring""" import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : int = { "artists_file": "artists.json", "lyrics_file": "lyrics.json", "genres_file": "genres.json", } UpperCamelCase : Optional[Any] = { "artists_file": { "jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json", }, "genres_file": { "jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json", }, "lyrics_file": { "jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json", }, } UpperCamelCase : Any = { "jukebox": 5_1_2, } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_LYRIC_TOKENS_SIZES lowercase = ["input_ids", "attention_mask"] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=["v3", "v2", "v2"] , __UpperCAmelCase=512 , __UpperCAmelCase=5 , __UpperCAmelCase="<|endoftext|>" , **__UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token super().__init__( unk_token=__UpperCAmelCase , n_genres=__UpperCAmelCase , version=__UpperCAmelCase , max_n_lyric_tokens=__UpperCAmelCase , **__UpperCAmelCase , ) __UpperCamelCase = version __UpperCamelCase = max_n_lyric_tokens __UpperCamelCase = n_genres with open(__UpperCAmelCase , encoding='utf-8' ) as vocab_handle: __UpperCamelCase = json.load(__UpperCAmelCase ) with open(__UpperCAmelCase , encoding='utf-8' ) as vocab_handle: __UpperCamelCase = json.load(__UpperCAmelCase ) with open(__UpperCAmelCase , encoding='utf-8' ) as vocab_handle: __UpperCamelCase = json.load(__UpperCAmelCase ) __UpperCamelCase = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder ) == 79: __UpperCamelCase = oov.replace(R'\-\'' , R'\-+\'' ) __UpperCamelCase = regex.compile(__UpperCAmelCase ) __UpperCamelCase = {v: k for k, v in self.artists_encoder.items()} __UpperCamelCase = {v: k for k, v in self.genres_encoder.items()} __UpperCamelCase = {v: k for k, v in self.lyrics_encoder.items()} @property def UpperCAmelCase ( self ): '''simple docstring''' return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder ) def UpperCAmelCase ( self ): '''simple docstring''' return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = [self.artists_encoder.get(__UpperCAmelCase , 0 ) for artist in list_artists] for genres in range(len(__UpperCAmelCase ) ): __UpperCamelCase = [self.genres_encoder.get(__UpperCAmelCase , 0 ) for genre in list_genres[genres]] __UpperCamelCase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] )) __UpperCamelCase = [[self.lyrics_encoder.get(__UpperCAmelCase , 0 ) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' return list(__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.prepare_for_tokenization(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) __UpperCamelCase = self._tokenize(__UpperCAmelCase ) return artist, genre, lyrics def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ): '''simple docstring''' for idx in range(len(self.version ) ): if self.version[idx] == "v3": __UpperCamelCase = artists[idx].lower() __UpperCamelCase = [genres[idx].lower()] else: __UpperCamelCase = self._normalize(artists[idx] ) + '.v2' __UpperCamelCase = [ self._normalize(__UpperCAmelCase ) + '.v2' for genre in genres[idx].split('_' ) ] # split is for the full dictionary with combined genres if self.version[0] == "v2": __UpperCamelCase = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' ) __UpperCamelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n' __UpperCamelCase = {vocab[index]: index + 1 for index in range(len(__UpperCAmelCase ) )} __UpperCamelCase = 0 __UpperCamelCase = len(__UpperCAmelCase ) + 1 __UpperCamelCase = self.vocab __UpperCamelCase = {v: k for k, v in self.vocab.items()} __UpperCamelCase = '' else: __UpperCamelCase = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' ) __UpperCamelCase = self._run_strip_accents(__UpperCAmelCase ) __UpperCamelCase = lyrics.replace('\\' , '\n' ) __UpperCamelCase = self.out_of_vocab.sub('' , __UpperCAmelCase ), [], [] return artists, genres, lyrics def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = unicodedata.normalize('NFD' , __UpperCAmelCase ) __UpperCamelCase = [] for char in text: __UpperCamelCase = unicodedata.category(__UpperCAmelCase ) if cat == "Mn": continue output.append(__UpperCAmelCase ) return "".join(__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = ( [chr(__UpperCAmelCase ) for i in range(ord('a' ) , ord('z' ) + 1 )] + [chr(__UpperCAmelCase ) for i in range(ord('A' ) , ord('Z' ) + 1 )] + [chr(__UpperCAmelCase ) for i in range(ord('0' ) , ord('9' ) + 1 )] + ['.'] ) __UpperCamelCase = frozenset(__UpperCAmelCase ) __UpperCamelCase = re.compile(R'_+' ) __UpperCamelCase = ''.join([c if c in accepted else '_' for c in text.lower()] ) __UpperCamelCase = pattern.sub('_' , __UpperCAmelCase ).strip('_' ) return text def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' return " ".join(__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): __UpperCamelCase = TensorType(__UpperCAmelCase ) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( 'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' ) import tensorflow as tf __UpperCamelCase = tf.constant __UpperCamelCase = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' ) import torch __UpperCamelCase = torch.tensor __UpperCamelCase = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' ) import jax.numpy as jnp # noqa: F811 __UpperCamelCase = jnp.array __UpperCamelCase = _is_jax else: __UpperCamelCase = np.asarray __UpperCamelCase = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: __UpperCamelCase = [inputs] if not is_tensor(__UpperCAmelCase ): __UpperCamelCase = as_tensor(__UpperCAmelCase ) except: # noqa E722 raise ValueError( 'Unable to create tensor, you should probably activate truncation and/or padding ' 'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' ) return inputs def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="" , __UpperCAmelCase="pt" ): '''simple docstring''' __UpperCamelCase = [0, 0, 0] __UpperCamelCase = [artist] * len(self.version ) __UpperCamelCase = [genres] * len(self.version ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.tokenize(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self._convert_token_to_id(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) __UpperCamelCase = [-INFINITY] * len(full_tokens[-1] ) __UpperCamelCase = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__UpperCAmelCase ) for i in range(len(self.version ) ) ] return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' if not os.path.isdir(__UpperCAmelCase ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return __UpperCamelCase = os.path.join( __UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] ) with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.artists_encoder , ensure_ascii=__UpperCAmelCase ) ) __UpperCamelCase = os.path.join( __UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] ) with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.genres_encoder , ensure_ascii=__UpperCAmelCase ) ) __UpperCamelCase = os.path.join( __UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] ) with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__UpperCAmelCase ) ) return (artists_file, genres_file, lyrics_file) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.artists_decoder.get(__UpperCAmelCase ) __UpperCamelCase = [self.genres_decoder.get(__UpperCAmelCase ) for genre in genres_index] __UpperCamelCase = [self.lyrics_decoder.get(__UpperCAmelCase ) for character in lyric_index] return artist, genres, lyrics
263
1
'''simple docstring''' lowerCamelCase : List[str] = { 'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.', 'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.', 'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----', '2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...', '8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.', ':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.', '?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-', '(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/' } # Exclamation mark is not in ITU-R recommendation # fmt: on lowerCamelCase : Any = {value: key for key, value in MORSE_CODE_DICT.items()} def _SCREAMING_SNAKE_CASE (A ) -> str: """simple docstring""" return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def _SCREAMING_SNAKE_CASE (A ) -> str: """simple docstring""" return "".join(REVERSE_DICT[char] for char in message.split() ) def _SCREAMING_SNAKE_CASE () -> None: """simple docstring""" lowercase__ = '''Morse code here!''' print(A ) lowercase__ = encrypt(A ) print(A ) lowercase__ = decrypt(A ) print(A ) if __name__ == "__main__": main()
2
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging snake_case_ = logging.get_logger(__name__) def snake_case__ ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ): '''simple docstring''' lowercase__ : List[str] = set() lowercase__ : List[str] = [] def parse_line(SCREAMING_SNAKE_CASE_ : Union[str, Any] ): for line in fp: if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): lowercase__ : Optional[int] = line.decode('UTF-8' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(' ' ): # process a single warning and move it to `selected_warnings`. if len(SCREAMING_SNAKE_CASE_ ) > 0: lowercase__ : Optional[Any] = '\n'.join(SCREAMING_SNAKE_CASE_ ) # Only keep the warnings specified in `targets` if any(f""": {x}: """ in warning for x in targets ): selected_warnings.add(SCREAMING_SNAKE_CASE_ ) buffer.clear() continue else: lowercase__ : Optional[Any] = line.strip() buffer.append(SCREAMING_SNAKE_CASE_ ) if from_gh: for filename in os.listdir(SCREAMING_SNAKE_CASE_ ): lowercase__ : int = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): # read the file if filename != "warnings.txt": continue with open(SCREAMING_SNAKE_CASE_ ) as fp: parse_line(SCREAMING_SNAKE_CASE_ ) else: try: with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z: for filename in z.namelist(): if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): # read the file if filename != "warnings.txt": continue with z.open(SCREAMING_SNAKE_CASE_ ) as fp: parse_line(SCREAMING_SNAKE_CASE_ ) except Exception: logger.warning( f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" ) return selected_warnings def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ): '''simple docstring''' lowercase__ : Optional[Any] = set() lowercase__ : List[str] = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if (p.endswith('.zip' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) return selected_warnings if __name__ == "__main__": def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any ): '''simple docstring''' return values.split(',' ) snake_case_ = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') parser.add_argument( '''--output_dir''', type=str, required=True, help='''Where to store the downloaded artifacts and other result files.''', ) parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''') # optional parameters parser.add_argument( '''--targets''', default='''DeprecationWarning,UserWarning,FutureWarning''', type=list_str, help='''Comma-separated list of target warning(s) which we want to extract.''', ) parser.add_argument( '''--from_gh''', action='''store_true''', help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''', ) snake_case_ = parser.parse_args() snake_case_ = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links snake_case_ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print('''=''' * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts snake_case_ = extract_warnings(args.output_dir, args.targets) snake_case_ = sorted(selected_warnings) with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
214
0
import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets _lowercase : Dict ="\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n" _lowercase : List[Any] ="\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n" _lowercase : Union[str, Any] ="\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case__ (datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[ """https://en.wikipedia.org/wiki/ROUGE_(metric)""", """https://github.com/google-research/google-research/tree/master/rouge""", ] , ) def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase=None , __lowercase=True , __lowercase=False ) -> Any: """simple docstring""" if rouge_types is None: a__ : Optional[int] = ["rouge1", "rouge2", "rougeL", "rougeLsum"] a__ : List[Any] = rouge_scorer.RougeScorer(rouge_types=_a , use_stemmer=_a ) if use_aggregator: a__ : Optional[int] = scoring.BootstrapAggregator() else: a__ : Optional[Any] = [] for ref, pred in zip(_a , _a ): a__ : Dict = scorer.score(_a , _a ) if use_aggregator: aggregator.add_scores(_a ) else: scores.append(_a ) if use_aggregator: a__ : str = aggregator.aggregate() else: a__ : Dict = {} for key in scores[0]: a__ : List[str] = [score[key] for score in scores] return result
370
from __future__ import annotations import math def lowerCAmelCase_ ( _lowercase : int) -> list[int]: """simple docstring""" if num <= 0: a__ : Tuple = F'''{num}: Invalid input, please enter a positive integer.''' raise ValueError(_lowercase) a__ : List[Any] = [True] * (num + 1) a__ : List[str] = [] a__ : List[Any] = 2 a__ : Optional[int] = int(math.sqrt(_lowercase)) while start <= end: # If start is a prime if sieve[start] is True: prime.append(_lowercase) # Set multiples of start be False for i in range(start * start , num + 1 , _lowercase): if sieve[i] is True: a__ : Optional[int] = False start += 1 for j in range(end + 1 , num + 1): if sieve[j] is True: prime.append(_lowercase) return prime if __name__ == "__main__": print(prime_sieve(int(input("Enter a positive integer: ").strip())))
266
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): _UpperCamelCase : List[Any] = KandinskyImgaImgPipeline _UpperCamelCase : Optional[Any] = ["prompt", "image_embeds", "negative_image_embeds", "image"] _UpperCamelCase : List[Any] = [ "prompt", "negative_prompt", "image_embeds", "negative_image_embeds", "image", ] _UpperCamelCase : Dict = [ "generator", "height", "width", "strength", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] _UpperCamelCase : Union[str, Any] = False @property def __A ( self ): return 32 @property def __A ( self ): return 32 @property def __A ( self ): return self.time_input_dim @property def __A ( self ): return self.time_input_dim * 4 @property def __A ( self ): return 100 @property def __A ( self ): _lowerCAmelCase : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" ) return tokenizer @property def __A ( self ): torch.manual_seed(0 ) _lowerCAmelCase : List[Any] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) _lowerCAmelCase : int = MultilingualCLIP(a__ ) _lowerCAmelCase : Union[str, Any] = text_encoder.eval() return text_encoder @property def __A ( self ): torch.manual_seed(0 ) _lowerCAmelCase : str = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """text_image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """text_image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } _lowerCAmelCase : Optional[Any] = UNetaDConditionModel(**a__ ) return model @property def __A ( self ): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __A ( self ): torch.manual_seed(0 ) _lowerCAmelCase : str = VQModel(**self.dummy_movq_kwargs ) return model def __A ( self ): _lowerCAmelCase : Union[str, Any] = self.dummy_text_encoder _lowerCAmelCase : List[Any] = self.dummy_tokenizer _lowerCAmelCase : int = self.dummy_unet _lowerCAmelCase : Dict = self.dummy_movq _lowerCAmelCase : Tuple = { """num_train_timesteps""": 1000, """beta_schedule""": """linear""", """beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } _lowerCAmelCase : Optional[Any] = DDIMScheduler(**a__ ) _lowerCAmelCase : List[Any] = { """text_encoder""": text_encoder, """tokenizer""": tokenizer, """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def __A ( self , a__ , a__=0 ): _lowerCAmelCase : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a__ ) ).to(a__ ) _lowerCAmelCase : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a__ ) # create init_image _lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(a__ ) ).to(a__ ) _lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase : List[Any] = Image.fromarray(np.uinta(a__ ) ).convert("""RGB""" ).resize((256, 256) ) if str(a__ ).startswith("""mps""" ): _lowerCAmelCase : List[Any] = torch.manual_seed(a__ ) else: _lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ ) _lowerCAmelCase : Optional[Any] = { """prompt""": """horse""", """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def __A ( self ): _lowerCAmelCase : Any = """cpu""" _lowerCAmelCase : int = self.get_dummy_components() _lowerCAmelCase : int = self.pipeline_class(**a__ ) _lowerCAmelCase : Optional[int] = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) _lowerCAmelCase : Tuple = pipe(**self.get_dummy_inputs(a__ ) ) _lowerCAmelCase : List[Any] = output.images _lowerCAmelCase : Tuple = pipe( **self.get_dummy_inputs(a__ ) , return_dict=a__ , )[0] _lowerCAmelCase : Dict = image[0, -3:, -3:, -1] _lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _lowerCAmelCase : str = np.array( [0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class __A ( unittest.TestCase ): def __A ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self ): _lowerCAmelCase : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/kandinsky_img2img_frog.npy""" ) _lowerCAmelCase : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) _lowerCAmelCase : Union[str, Any] = """A red cartoon frog, 4k""" _lowerCAmelCase : int = KandinskyPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(a__ ) _lowerCAmelCase : Tuple = KandinskyImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa ) _lowerCAmelCase : Any = pipeline.to(a__ ) pipeline.set_progress_bar_config(disable=a__ ) _lowerCAmelCase : Any = torch.Generator(device="""cpu""" ).manual_seed(0 ) _lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior( a__ , generator=a__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() _lowerCAmelCase : Union[str, Any] = pipeline( a__ , image=a__ , image_embeds=a__ , negative_image_embeds=a__ , generator=a__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , ) _lowerCAmelCase : Dict = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(a__ , a__ )
44
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = (DDPMParallelScheduler,) def SCREAMING_SNAKE_CASE_ (self : Any , **UpperCAmelCase_ : Any) ->Any: '''simple docstring''' lowerCamelCase__: Any ={ "num_train_timesteps": 1_000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**UpperCAmelCase_) return config def SCREAMING_SNAKE_CASE_ (self : int) ->Dict: '''simple docstring''' for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]: '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]): self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]: '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]: '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple: '''simple docstring''' self.check_over_configs(thresholding=UpperCAmelCase_) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , ) def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]: '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : int) ->int: '''simple docstring''' for t in [0, 500, 999]: self.check_over_forward(time_step=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str: '''simple docstring''' lowerCamelCase__: Dict =self.scheduler_classes[0] lowerCamelCase__: Tuple =self.get_scheduler_config() lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_) assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_0979)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5 def SCREAMING_SNAKE_CASE_ (self : Any) ->str: '''simple docstring''' lowerCamelCase__: int =self.scheduler_classes[0] lowerCamelCase__: Tuple =self.get_scheduler_config() lowerCamelCase__: Tuple =scheduler_class(**UpperCAmelCase_) lowerCamelCase__: str =len(UpperCAmelCase_) lowerCamelCase__: Optional[int] =self.dummy_model() lowerCamelCase__: int =self.dummy_sample_deter lowerCamelCase__: Union[str, Any] =self.dummy_sample_deter + 0.1 lowerCamelCase__: Optional[Any] =self.dummy_sample_deter - 0.1 lowerCamelCase__: Optional[Any] =samplea.shape[0] lowerCamelCase__: List[Any] =torch.stack([samplea, samplea, samplea] , dim=0) lowerCamelCase__: Union[str, Any] =torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_) lowerCamelCase__: Optional[int] =model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1)) lowerCamelCase__: Tuple =scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1)) lowerCamelCase__: List[str] =torch.sum(torch.abs(UpperCAmelCase_)) lowerCamelCase__: Any =torch.mean(torch.abs(UpperCAmelCase_)) assert abs(result_sum.item() - 1153.1833) < 1E-2 assert abs(result_mean.item() - 0.5005) < 1E-3 def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: Any =self.scheduler_classes[0] lowerCamelCase__: Optional[Any] =self.get_scheduler_config() lowerCamelCase__: Optional[int] =scheduler_class(**UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =self.dummy_model() lowerCamelCase__: List[Any] =self.dummy_sample_deter lowerCamelCase__: int =torch.manual_seed(0) for t in reversed(range(UpperCAmelCase_)): # 1. predict noise residual lowerCamelCase__: Tuple =model(UpperCAmelCase_ , UpperCAmelCase_) # 2. predict previous mean of sample x_t-1 lowerCamelCase__: Optional[Any] =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample lowerCamelCase__: Any =pred_prev_sample lowerCamelCase__: Any =torch.sum(torch.abs(UpperCAmelCase_)) lowerCamelCase__: List[str] =torch.mean(torch.abs(UpperCAmelCase_)) assert abs(result_sum.item() - 258.9606) < 1E-2 assert abs(result_mean.item() - 0.3372) < 1E-3 def SCREAMING_SNAKE_CASE_ (self : int) ->Any: '''simple docstring''' lowerCamelCase__: Tuple =self.scheduler_classes[0] lowerCamelCase__: Any =self.get_scheduler_config(prediction_type="v_prediction") lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_) lowerCamelCase__: str =len(UpperCAmelCase_) lowerCamelCase__: str =self.dummy_model() lowerCamelCase__: str =self.dummy_sample_deter lowerCamelCase__: Dict =torch.manual_seed(0) for t in reversed(range(UpperCAmelCase_)): # 1. predict noise residual lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , UpperCAmelCase_) # 2. predict previous mean of sample x_t-1 lowerCamelCase__: Dict =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample lowerCamelCase__: List[str] =pred_prev_sample lowerCamelCase__: List[Any] =torch.sum(torch.abs(UpperCAmelCase_)) lowerCamelCase__: Tuple =torch.mean(torch.abs(UpperCAmelCase_)) assert abs(result_sum.item() - 202.0296) < 1E-2 assert abs(result_mean.item() - 0.2631) < 1E-3 def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]: '''simple docstring''' lowerCamelCase__: str =self.scheduler_classes[0] lowerCamelCase__: Union[str, Any] =self.get_scheduler_config() lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_) lowerCamelCase__: List[Any] =[100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=UpperCAmelCase_) lowerCamelCase__: Union[str, Any] =scheduler.timesteps for i, timestep in enumerate(UpperCAmelCase_): if i == len(UpperCAmelCase_) - 1: lowerCamelCase__: Dict =-1 else: lowerCamelCase__: Union[str, Any] =timesteps[i + 1] lowerCamelCase__: Tuple =scheduler.previous_timestep(UpperCAmelCase_) lowerCamelCase__: str =prev_t.item() self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: Tuple =self.scheduler_classes[0] lowerCamelCase__: List[Any] =self.get_scheduler_config() lowerCamelCase__: Dict =scheduler_class(**UpperCAmelCase_) lowerCamelCase__: Optional[Any] =[100, 87, 50, 51, 0] with self.assertRaises(UpperCAmelCase_ , msg="`custom_timesteps` must be in descending order."): scheduler.set_timesteps(timesteps=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]: '''simple docstring''' lowerCamelCase__: Dict =self.scheduler_classes[0] lowerCamelCase__: Any =self.get_scheduler_config() lowerCamelCase__: int =scheduler_class(**UpperCAmelCase_) lowerCamelCase__: Optional[int] =[100, 87, 50, 1, 0] lowerCamelCase__: int =len(UpperCAmelCase_) with self.assertRaises(UpperCAmelCase_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."): scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_) def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any: '''simple docstring''' lowerCamelCase__: Tuple =self.scheduler_classes[0] lowerCamelCase__: Optional[Any] =self.get_scheduler_config() lowerCamelCase__: Optional[Any] =scheduler_class(**UpperCAmelCase_) lowerCamelCase__: Dict =[scheduler.config.num_train_timesteps] with self.assertRaises( UpperCAmelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ): scheduler.set_timesteps(timesteps=UpperCAmelCase_)
10
0
'''simple docstring''' def snake_case__ ( lowerCamelCase__ : list ) -> list: if len(UpperCamelCase__ ) < 2: return collection def circle_sort_util(lowerCamelCase__ : list , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> bool: A_ : Optional[Any] = False if low == high: return swapped A_ : Optional[Any] = low A_ : Tuple = high while left < right: if collection[left] > collection[right]: A_ ,A_ : Tuple = ( collection[right], collection[left], ) A_ : List[str] = True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: A_ ,A_ : str = ( collection[right + 1], collection[left], ) A_ : List[Any] = True A_ : Optional[Any] = low + int((high - low) / 2 ) A_ : Union[str, Any] = circle_sort_util(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) A_ : Any = circle_sort_util(UpperCamelCase__ , mid + 1 , UpperCamelCase__ ) return swapped or left_swap or right_swap A_ : str = True while is_not_sorted is True: A_ : Tuple = circle_sort_util(UpperCamelCase__ , 0 , len(UpperCamelCase__ ) - 1 ) return collection if __name__ == "__main__": snake_case__ = input("""Enter numbers separated by a comma:\n""").strip() snake_case__ = [int(item) for item in user_input.split(""",""")] print(circle_sort(unsorted))
366
'''simple docstring''' import pprint import requests snake_case__ = """https://zenquotes.io/api""" def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/today''' ).json() def snake_case__ ( ) -> list: return requests.get(API_ENDPOINT_URL + '''/random''' ).json() if __name__ == "__main__": snake_case__ = random_quotes() pprint.pprint(response)
4
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : str = logging.get_logger(__name__) _lowerCAmelCase : List[Any] = { "facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json", # See all XGLM models at https://huggingface.co/models?filter=xglm } class _UpperCamelCase ( A__ ): UpperCAmelCase_ = """xglm""" UpperCAmelCase_ = ["""past_key_values"""] UpperCAmelCase_ = { """num_attention_heads""": """attention_heads""", """hidden_size""": """d_model""", """num_hidden_layers""": """num_layers""", } def __init__( self :Union[str, Any] , lowerCamelCase :Optional[Any]=25_6008 , lowerCamelCase :Union[str, Any]=2048 , lowerCamelCase :int=1024 , lowerCamelCase :int=4096 , lowerCamelCase :Dict=24 , lowerCamelCase :Tuple=16 , lowerCamelCase :List[Any]="gelu" , lowerCamelCase :Optional[Any]=0.1 , lowerCamelCase :List[Any]=0.1 , lowerCamelCase :int=0.0 , lowerCamelCase :Union[str, Any]=0.0 , lowerCamelCase :Tuple=0.02 , lowerCamelCase :Dict=True , lowerCamelCase :List[str]=True , lowerCamelCase :str=2 , lowerCamelCase :Tuple=1 , lowerCamelCase :Optional[Any]=0 , lowerCamelCase :Dict=2 , **lowerCamelCase :Union[str, Any] , ) -> Optional[int]: UpperCAmelCase__ = vocab_size UpperCAmelCase__ = max_position_embeddings UpperCAmelCase__ = d_model UpperCAmelCase__ = ffn_dim UpperCAmelCase__ = num_layers UpperCAmelCase__ = attention_heads UpperCAmelCase__ = activation_function UpperCAmelCase__ = dropout UpperCAmelCase__ = attention_dropout UpperCAmelCase__ = activation_dropout UpperCAmelCase__ = layerdrop UpperCAmelCase__ = init_std UpperCAmelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase__ = use_cache super().__init__( pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , **_a , )
169
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase : int = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : int = ["GLPNFeatureExtractor"] lowerCamelCase : Optional[int] = ["GLPNImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Union[str, Any] = [ "GLPN_PRETRAINED_MODEL_ARCHIVE_LIST", "GLPNForDepthEstimation", "GLPNLayer", "GLPNModel", "GLPNPreTrainedModel", ] if TYPE_CHECKING: from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_glpn import GLPNFeatureExtractor from .image_processing_glpn import GLPNImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_glpn import ( GLPN_PRETRAINED_MODEL_ARCHIVE_LIST, GLPNForDepthEstimation, GLPNLayer, GLPNModel, GLPNPreTrainedModel, ) else: import sys lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
47
0
'''simple docstring''' import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ ( A__ ): def A ( self : Dict ) -> List[str]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_a , 'embed_dim' ) ) self.parent.assertTrue(hasattr(_a , 'num_heads' ) ) class A__ : def __init__( self : Union[str, Any] , _a : Tuple , _a : int=13 , _a : Any=64 , _a : Optional[Any]=3 , _a : Union[str, Any]=[16, 48, 96] , _a : Optional[Any]=[1, 3, 6] , _a : int=[1, 2, 10] , _a : List[str]=[7, 3, 3] , _a : Any=[4, 2, 2] , _a : Any=[2, 1, 1] , _a : Optional[Any]=[2, 2, 2] , _a : str=[False, False, True] , _a : str=[0.0, 0.0, 0.0] , _a : Union[str, Any]=0.02 , _a : int=1e-12 , _a : Optional[Any]=True , _a : Optional[int]=True , _a : List[Any]=2 , ) -> int: '''simple docstring''' _SCREAMING_SNAKE_CASE =parent _SCREAMING_SNAKE_CASE =batch_size _SCREAMING_SNAKE_CASE =image_size _SCREAMING_SNAKE_CASE =patch_sizes _SCREAMING_SNAKE_CASE =patch_stride _SCREAMING_SNAKE_CASE =patch_padding _SCREAMING_SNAKE_CASE =is_training _SCREAMING_SNAKE_CASE =use_labels _SCREAMING_SNAKE_CASE =num_labels _SCREAMING_SNAKE_CASE =num_channels _SCREAMING_SNAKE_CASE =embed_dim _SCREAMING_SNAKE_CASE =num_heads _SCREAMING_SNAKE_CASE =stride_kv _SCREAMING_SNAKE_CASE =depth _SCREAMING_SNAKE_CASE =cls_token _SCREAMING_SNAKE_CASE =attention_drop_rate _SCREAMING_SNAKE_CASE =initializer_range _SCREAMING_SNAKE_CASE =layer_norm_eps def A ( self : List[Any] ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _SCREAMING_SNAKE_CASE =None if self.use_labels: _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels ) _SCREAMING_SNAKE_CASE =self.get_config() return config, pixel_values, labels def A ( self : Dict ) -> Optional[Any]: '''simple docstring''' return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def A ( self : Tuple , _a : List[Any] , _a : Tuple , _a : Any ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE =CvtModel(config=_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model(_a ) _SCREAMING_SNAKE_CASE =(self.image_size, self.image_size) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =image_size[0], image_size[1] for i in range(len(self.depth ) ): _SCREAMING_SNAKE_CASE =floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) _SCREAMING_SNAKE_CASE =floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def A ( self : str , _a : List[str] , _a : Union[str, Any] , _a : List[Any] ) -> Union[str, Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.num_labels _SCREAMING_SNAKE_CASE =CvtForImageClassification(_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model(_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : List[str] ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs _SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): A__ = (CvtModel, CvtForImageClassification) if is_torch_available() else () A__ = ( {'feature-extraction': CvtModel, 'image-classification': CvtForImageClassification} if is_torch_available() else {} ) A__ = False A__ = False A__ = False A__ = False A__ = False def A ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' _SCREAMING_SNAKE_CASE =CvtModelTester(self ) _SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 ) def A ( self : Any ) -> Any: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A ( self : Any ) -> Optional[Any]: '''simple docstring''' return @unittest.skip(reason='Cvt does not output attentions' ) def A ( self : List[str] ) -> Optional[int]: '''simple docstring''' pass @unittest.skip(reason='Cvt does not use inputs_embeds' ) def A ( self : str ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason='Cvt does not support input and output embeddings' ) def A ( self : Tuple ) -> Any: '''simple docstring''' pass def A ( self : str ) -> Union[str, Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE =model_class(_a ) _SCREAMING_SNAKE_CASE =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _SCREAMING_SNAKE_CASE =[*signature.parameters.keys()] _SCREAMING_SNAKE_CASE =['pixel_values'] self.assertListEqual(arg_names[:1] , _a ) def A ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def A ( self : Union[str, Any] ) -> Dict: '''simple docstring''' def check_hidden_states_output(_a : Optional[int] , _a : Optional[Any] , _a : str ): _SCREAMING_SNAKE_CASE =model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): _SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) ) _SCREAMING_SNAKE_CASE =outputs.hidden_states _SCREAMING_SNAKE_CASE =len(self.model_tester.depth ) self.assertEqual(len(_a ) , _a ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE =True check_hidden_states_output(_a , _a , _a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _SCREAMING_SNAKE_CASE =True check_hidden_states_output(_a , _a , _a ) def A ( self : Tuple ) -> List[str]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def A ( self : str ) -> List[Any]: '''simple docstring''' pass @slow def A ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE =CvtModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def _lowerCAmelCase ( ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): @cached_property def A ( self : Tuple ) -> List[str]: '''simple docstring''' return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def A ( self : List[str] ) -> Optional[int]: '''simple docstring''' _SCREAMING_SNAKE_CASE =CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a ) _SCREAMING_SNAKE_CASE =self.default_image_processor _SCREAMING_SNAKE_CASE =prepare_img() _SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='pt' ).to(_a ) # forward pass with torch.no_grad(): _SCREAMING_SNAKE_CASE =model(**_a ) # verify the logits _SCREAMING_SNAKE_CASE =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _a ) _SCREAMING_SNAKE_CASE =torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(_a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
358
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase : Optional[int] = { "configuration_efficientnet": [ "EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "EfficientNetConfig", "EfficientNetOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : str = ["EfficientNetImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Tuple = [ "EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST", "EfficientNetForImageClassification", "EfficientNetModel", "EfficientNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
114
0
'''simple docstring''' from __future__ import annotations def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]: if nth_term == "": return [""] _a : Optional[int] = int(lowerCAmelCase_ ) _a : int = int(lowerCAmelCase_ ) _a : list[str] = [] for temp in range(int(lowerCAmelCase_ ) ): series.append(f"""1 / {pow(temp + 1 , int(lowerCAmelCase_ ) )}""" if series else '1' ) return series if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase = int(input('''Enter the last number (nth term) of the P-Series''')) __lowerCAmelCase = int(input('''Enter the power for P-Series''')) print('''Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p''') print(p_series(nth_term, power))
89
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType __lowerCAmelCase = None __lowerCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>''' # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image __lowerCAmelCase = [ np.dtype('''|b1'''), np.dtype('''|u1'''), np.dtype('''<u2'''), np.dtype('''>u2'''), np.dtype('''<i2'''), np.dtype('''>i2'''), np.dtype('''<u4'''), np.dtype('''>u4'''), np.dtype('''<i4'''), np.dtype('''>i4'''), np.dtype('''<f4'''), np.dtype('''>f4'''), np.dtype('''<f8'''), np.dtype('''>f8'''), ] @dataclass class __magic_name__ : lowerCAmelCase : bool = True lowerCAmelCase : Optional[str] = None # Automatically constructed lowerCAmelCase : ClassVar[str] = "PIL.Image.Image" lowerCAmelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} ) lowerCAmelCase : str = field(default='Image' , init=_UpperCamelCase , repr=_UpperCamelCase ) def __call__( self : Union[str, Any] ): return self.pa_type def __lowercase ( self : Any ,_UpperCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : Optional[Any] = np.array(_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): return {"path": value, "bytes": None} elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ): return {"path": None, "bytes": value} elif isinstance(_UpperCAmelCase ,np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase ,PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(_UpperCAmelCase ) elif value.get('path' ) is not None and os.path.isfile(value['path'] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get('path' )} elif value.get('bytes' ) is not None or value.get('path' ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get('bytes' ), "path": value.get('path' )} else: raise ValueError( F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : dict ,_UpperCAmelCase : Optional[int]=None ): if not self.decode: raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support decoding images, please install \'Pillow\'.' ) if token_per_repo_id is None: _a : Dict = {} _a , _a : str = value['path'], value['bytes'] if bytes_ is None: if path is None: raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" ) else: if is_local_path(_UpperCAmelCase ): _a : Any = PIL.Image.open(_UpperCAmelCase ) else: _a : List[Any] = path.split('::' )[-1] try: _a : str = string_to_dict(_UpperCAmelCase ,config.HUB_DATASETS_URL )['repo_id'] _a : Optional[Any] = token_per_repo_id.get(_UpperCAmelCase ) except ValueError: _a : int = None with xopen(_UpperCAmelCase ,'rb' ,use_auth_token=_UpperCAmelCase ) as f: _a : Tuple = BytesIO(f.read() ) _a : Union[str, Any] = PIL.Image.open(bytes_ ) else: _a : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def __lowercase ( self : int ): from .features import Value return ( self if self.decode else { "bytes": Value('binary' ), "path": Value('string' ), } ) def __lowercase ( self : str ,_UpperCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ): if pa.types.is_string(storage.type ): _a : Union[str, Any] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() ) _a : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] ,['bytes', 'path'] ,mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): _a : List[str] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() ) _a : Any = pa.StructArray.from_arrays([storage, path_array] ,['bytes', 'path'] ,mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('bytes' ) >= 0: _a : Union[str, Any] = storage.field('bytes' ) else: _a : Tuple = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() ) if storage.type.get_field_index('path' ) >= 0: _a : Union[str, Any] = storage.field('path' ) else: _a : Dict = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() ) _a : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=storage.is_null() ) elif pa.types.is_list(storage.type ): _a : List[str] = pa.array( [encode_np_array(np.array(_UpperCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,) _a : int = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() ) _a : Optional[Any] = pa.StructArray.from_arrays( [bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() ) return array_cast(_UpperCAmelCase ,self.pa_type ) def __lowercase ( self : Dict ,_UpperCAmelCase : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(_UpperCAmelCase : Tuple ): with xopen(_UpperCAmelCase ,'rb' ) as f: _a : int = f.read() return bytes_ _a : Any = pa.array( [ (path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None for x in storage.to_pylist() ] ,type=pa.binary() ,) _a : Optional[Any] = pa.array( [os.path.basename(_UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] ,type=pa.string() ,) _a : Dict = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() ) return array_cast(_UpperCAmelCase ,self.pa_type ) def __lowerCamelCase ( ) -> List[str]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() _a : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def __lowerCamelCase ( lowerCAmelCase_ ) -> bytes: _a : Optional[int] = BytesIO() if image.format in list_image_compression_formats(): _a : Optional[Any] = image.format else: _a : str = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF' image.save(lowerCAmelCase_ , format=lowerCAmelCase_ ) return buffer.getvalue() def __lowerCamelCase ( lowerCAmelCase_ ) -> dict: if hasattr(lowerCAmelCase_ , 'filename' ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )} def __lowerCamelCase ( lowerCAmelCase_ ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) _a : List[Any] = array.dtype _a : Optional[int] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER _a : Union[str, Any] = dtype.kind _a : Union[str, Any] = dtype.itemsize _a : List[Any] = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: _a : Optional[int] = np.dtype('|u1' ) if dtype_kind not in ["u", "i"]: raise TypeError( f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" ) if dtype is not dest_dtype: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: _a : Union[str, Any] = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: _a : str = dtype_byteorder + dtype_kind + str(lowerCAmelCase_ ) _a : List[Any] = np.dtype(lowerCAmelCase_ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" ) _a : Union[str, Any] = PIL.Image.fromarray(array.astype(lowerCAmelCase_ ) ) return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )} def __lowerCamelCase ( lowerCAmelCase_ ) -> List[dict]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) if objs: _a , _a : Optional[Any] = first_non_null_value(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(lowerCAmelCase_ , np.ndarray ): _a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ ) return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs] elif isinstance(lowerCAmelCase_ , PIL.Image.Image ): _a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ ) return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs] else: return objs else: return objs
89
1
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 _A = get_tests_dir('''fixtures''') class lowercase_ ( unittest.TestCase ): def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = mock.Mock() UpperCamelCase_ = 5_0_0 UpperCamelCase_ = {} UpperCamelCase_ = HTTPError UpperCamelCase_ = {} # Download this model to make sure it's in the cache. UpperCamelCase_ = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("""requests.Session.request""" , return_value=__snake_case ) as mock_head: UpperCamelCase_ = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # This check we did call the fake head request mock_head.assert_called() def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = ViTImageProcessor.from_pretrained( """https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" ) def lowerCamelCase_ ( self ): """simple docstring""" with self.assertRaises(__snake_case ): # config is in subfolder, the following should not work without specifying the subfolder UpperCamelCase_ = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" ) UpperCamelCase_ = AutoImageProcessor.from_pretrained( """hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" ) self.assertIsNotNone(__snake_case ) @is_staging_test class lowercase_ ( unittest.TestCase ): @classmethod def lowerCamelCase_ ( cls ): """simple docstring""" UpperCamelCase_ = TOKEN HfFolder.save_token(__snake_case ) @classmethod def lowerCamelCase_ ( cls ): """simple docstring""" try: delete_repo(token=cls._token , repo_id="""test-image-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" ) except HTTPError: pass def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = ViTImageProcessor.from_pretrained(__snake_case ) image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token ) UpperCamelCase_ = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __snake_case , repo_id="""test-image-processor""" , push_to_hub=__snake_case , use_auth_token=self._token ) UpperCamelCase_ = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) ) def lowerCamelCase_ ( self ): """simple docstring""" UpperCamelCase_ = ViTImageProcessor.from_pretrained(__snake_case ) image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token ) UpperCamelCase_ = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __snake_case , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=__snake_case , use_auth_token=self._token ) UpperCamelCase_ = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) ) def lowerCamelCase_ ( self ): """simple docstring""" CustomImageProcessor.register_for_auto_class() UpperCamelCase_ = CustomImageProcessor.from_pretrained(__snake_case ) image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , ) UpperCamelCase_ = AutoImageProcessor.from_pretrained( f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__snake_case ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
353
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _A = logging.get_logger(__name__) class lowercase_ ( __SCREAMING_SNAKE_CASE ): A__ : Union[str, Any] = ["""pixel_values"""] def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BICUBIC , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_5_5 , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ): """simple docstring""" super().__init__(**__UpperCamelCase ) UpperCamelCase_ = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4} UpperCamelCase_ = get_size_dict(__UpperCamelCase ) UpperCamelCase_ = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4} UpperCamelCase_ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase , param_name="""crop_size""" ) UpperCamelCase_ = do_resize UpperCamelCase_ = do_rescale UpperCamelCase_ = do_normalize UpperCamelCase_ = do_center_crop UpperCamelCase_ = crop_size UpperCamelCase_ = size UpperCamelCase_ = resample UpperCamelCase_ = rescale_factor UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN UpperCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , ): """simple docstring""" UpperCamelCase_ = get_size_dict(__UpperCamelCase ) if "shortest_edge" in size: UpperCamelCase_ = get_resize_output_image_size(__UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=__UpperCamelCase ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: UpperCamelCase_ = (size["""height"""], size["""width"""]) else: raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' ) return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ): """simple docstring""" UpperCamelCase_ = get_size_dict(__UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(__UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase ): """simple docstring""" return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ): """simple docstring""" return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , ): """simple docstring""" UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size UpperCamelCase_ = get_size_dict(__UpperCamelCase , param_name="""crop_size""" , default_to_square=__UpperCamelCase ) UpperCamelCase_ = resample if resample is not None else self.resample UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean UpperCamelCase_ = image_std if image_std is not None else self.image_std UpperCamelCase_ = size if size is not None else self.size UpperCamelCase_ = get_size_dict(__UpperCamelCase ) if not is_batched(__UpperCamelCase ): UpperCamelCase_ = [images] if not valid_images(__UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. UpperCamelCase_ = [to_numpy_array(__UpperCamelCase ) for image in images] if do_resize: UpperCamelCase_ = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images] if do_center_crop: UpperCamelCase_ = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images] if do_rescale: UpperCamelCase_ = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images] if do_normalize: UpperCamelCase_ = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images] UpperCamelCase_ = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images] UpperCamelCase_ = {"""pixel_values""": images} return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
261
0
import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel UpperCamelCase__ = { """text_branch""": """text_model""", """audio_branch""": """audio_model.audio_encoder""", """attn""": """attention.self""", """self.proj""": """output.dense""", """attention.self_mask""": """attn_mask""", """mlp.fc1""": """intermediate.dense""", """mlp.fc2""": """output.dense""", """norm1""": """layernorm_before""", """norm2""": """layernorm_after""", """bn0""": """batch_norm""", } UpperCamelCase__ = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""") def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str=False ): __lowerCAmelCase , __lowerCAmelCase = create_model( "HTSAT-tiny" , "roberta" , SCREAMING_SNAKE_CASE_ , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=SCREAMING_SNAKE_CASE_ , fusion_type="aff_2d" if enable_fusion else None , ) return model, model_cfg def _a ( SCREAMING_SNAKE_CASE_ : Tuple ): __lowerCAmelCase = {} __lowerCAmelCase = R".*sequential.(\d+).*" __lowerCAmelCase = R".*_projection.(\d+).*" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: __lowerCAmelCase = key.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # replace sequential layers with list __lowerCAmelCase = re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).group(1 ) __lowerCAmelCase = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(SCREAMING_SNAKE_CASE_ )//3}.linear.""" ) elif re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = int(re.match(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... __lowerCAmelCase = 1 if projecton_layer == 0 else 2 __lowerCAmelCase = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value __lowerCAmelCase = value __lowerCAmelCase = mixed_qkv.size(0 ) // 3 __lowerCAmelCase = mixed_qkv[:qkv_dim] __lowerCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2] __lowerCAmelCase = mixed_qkv[qkv_dim * 2 :] __lowerCAmelCase = query_layer __lowerCAmelCase = key_layer __lowerCAmelCase = value_layer else: __lowerCAmelCase = value return model_state_dict def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any=False ): __lowerCAmelCase , __lowerCAmelCase = init_clap(SCREAMING_SNAKE_CASE_ , enable_fusion=SCREAMING_SNAKE_CASE_ ) clap_model.eval() __lowerCAmelCase = clap_model.state_dict() __lowerCAmelCase = rename_state_dict(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = ClapConfig() __lowerCAmelCase = enable_fusion __lowerCAmelCase = ClapModel(SCREAMING_SNAKE_CASE_ ) # ignore the spectrogram embedding layer model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) transformers_config.save_pretrained(SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""") UpperCamelCase__ = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
92
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] ): __lowerCAmelCase = filter(lambda SCREAMING_SNAKE_CASE_ : p.requires_grad , model.parameters() ) __lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] ) return params UpperCamelCase__ = logging.getLogger(__name__) def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ): if metric == "rouge2": __lowerCAmelCase = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": __lowerCAmelCase = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": __lowerCAmelCase = "{val_avg_em:.4f}-{step_count}" else: raise NotImplementedError( F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this""" " function." ) __lowerCAmelCase = ModelCheckpoint( dirpath=SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , monitor=F"""val_{metric}""" , mode="max" , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ): return EarlyStopping( monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , ) class a__ ( pl.Callback ): def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" __lowerCAmelCase = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_A ) @rank_zero_only def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A=True ): """simple docstring""" logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" ) __lowerCAmelCase = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results __lowerCAmelCase = Path(pl_module.hparams.output_dir ) if type_path == "test": __lowerCAmelCase = od / "test_results.txt" __lowerCAmelCase = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __lowerCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt""" __lowerCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt""" results_file.parent.mkdir(exist_ok=_A ) generations_file.parent.mkdir(exist_ok=_A ) with open(_A , "a+" ) as writer: for key in sorted(_A ): if key in ["log", "progress_bar", "preds"]: continue __lowerCAmelCase = metrics[key] if isinstance(_A , torch.Tensor ): __lowerCAmelCase = val.item() __lowerCAmelCase = f"""{key}: {val:.6f}\n""" writer.write(_A ) if not save_generations: return if "preds" in metrics: __lowerCAmelCase = "\n".join(metrics["preds"] ) generations_file.open("w+" ).write(_A ) @rank_zero_only def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" try: __lowerCAmelCase = pl_module.model.model.num_parameters() except AttributeError: __lowerCAmelCase = pl_module.model.num_parameters() __lowerCAmelCase = count_trainable_parameters(_A ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} ) @rank_zero_only def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(_A , _A , "test" ) @rank_zero_only def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
92
1
'''simple docstring''' import random class _UpperCamelCase : '''simple docstring''' @staticmethod def __lowerCamelCase ( _lowerCAmelCase : str): '''simple docstring''' __lowercase =[ord(_lowerCAmelCase) for i in text] __lowercase =[] __lowercase =[] for i in plain: __lowercase =random.randint(1 , 3_0_0) __lowercase =(i + k) * k cipher.append(_lowerCAmelCase) key.append(_lowerCAmelCase) return cipher, key @staticmethod def __lowerCamelCase ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int]): '''simple docstring''' __lowercase =[] for i in range(len(_lowerCAmelCase)): __lowercase =int((cipher[i] - (key[i]) ** 2) / key[i]) plain.append(chr(_lowerCAmelCase)) return "".join(_lowerCAmelCase) if __name__ == "__main__": lowerCamelCase , lowerCamelCase = Onepad().encrypt("""Hello""") print(c, k) print(Onepad().decrypt(c, k))
48
'''simple docstring''' import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging lowerCamelCase = { """cola""": 2, """mnli""": 3, """mrpc""": 2, """sst-2""": 2, """sts-b""": 1, """qqp""": 2, """qnli""": 2, """rte""": 2, """wnli""": 2, } logging.set_verbosity_info() def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ): """simple docstring""" __lowercase =XLNetConfig.from_json_file(_lowerCAmelCase ) __lowercase =finetuning_task.lower() if finetuning_task is not None else '' if finetuning_task in GLUE_TASKS_NUM_LABELS: print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" ) __lowercase =finetuning_task __lowercase =GLUE_TASKS_NUM_LABELS[finetuning_task] __lowercase =XLNetForSequenceClassification(_lowerCAmelCase ) elif "squad" in finetuning_task: __lowercase =finetuning_task __lowercase =XLNetForQuestionAnswering(_lowerCAmelCase ) else: __lowercase =XLNetLMHeadModel(_lowerCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Save pytorch-model __lowercase =os.path.join(_lowerCAmelCase , _lowerCAmelCase ) __lowercase =os.path.join(_lowerCAmelCase , _lowerCAmelCase ) print(f"""Save PyTorch model to {os.path.abspath(_lowerCAmelCase )}""" ) torch.save(model.state_dict() , _lowerCAmelCase ) print(f"""Save configuration file to {os.path.abspath(_lowerCAmelCase )}""" ) with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--xlnet_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained XLNet model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the folder to store the PyTorch model or dataset/vocab.""", ) parser.add_argument( """--finetuning_task""", default=None, type=str, help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""", ) lowerCamelCase = parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
48
1