code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal lowerCamelCase__ : int = datasets.utils.logging.get_logger(__name__) lowerCamelCase__ : str = ['names', 'prefix'] lowerCamelCase__ : List[Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] lowerCamelCase__ : Optional[int] = ['encoding_errors', 'on_bad_lines'] lowerCamelCase__ : str = ['date_format'] @dataclass class lowerCamelCase_ ( datasets.BuilderConfig ): '''simple docstring''' lowercase_ = "," lowercase_ = None lowercase_ = "infer" lowercase_ = None lowercase_ = None lowercase_ = None lowercase_ = None lowercase_ = None lowercase_ = True lowercase_ = None lowercase_ = None lowercase_ = None lowercase_ = None lowercase_ = False lowercase_ = None lowercase_ = None lowercase_ = None lowercase_ = True lowercase_ = True lowercase_ = False lowercase_ = True lowercase_ = None lowercase_ = "." lowercase_ = None lowercase_ = '"' lowercase_ = 0 lowercase_ = None lowercase_ = None lowercase_ = None lowercase_ = None lowercase_ = True lowercase_ = True lowercase_ = 0 lowercase_ = True lowercase_ = False lowercase_ = None lowercase_ = 10_000 lowercase_ = None lowercase_ = "strict" lowercase_ = "error" lowercase_ = None def lowerCAmelCase_ ( self : Any ): if self.delimiter is not None: SCREAMING_SNAKE_CASE_ = self.delimiter if self.column_names is not None: SCREAMING_SNAKE_CASE_ = self.column_names @property def lowerCAmelCase_ ( self : Dict ): SCREAMING_SNAKE_CASE_ = { 'sep': self.sep, 'header': self.header, 'names': self.names, 'index_col': self.index_col, 'usecols': self.usecols, 'prefix': self.prefix, 'mangle_dupe_cols': self.mangle_dupe_cols, 'engine': self.engine, 'converters': self.converters, 'true_values': self.true_values, 'false_values': self.false_values, 'skipinitialspace': self.skipinitialspace, 'skiprows': self.skiprows, 'nrows': self.nrows, 'na_values': self.na_values, 'keep_default_na': self.keep_default_na, 'na_filter': self.na_filter, 'verbose': self.verbose, 'skip_blank_lines': self.skip_blank_lines, 'thousands': self.thousands, 'decimal': self.decimal, 'lineterminator': self.lineterminator, 'quotechar': self.quotechar, 'quoting': self.quoting, 'escapechar': self.escapechar, 'comment': self.comment, 'encoding': self.encoding, 'dialect': self.dialect, 'error_bad_lines': self.error_bad_lines, 'warn_bad_lines': self.warn_bad_lines, 'skipfooter': self.skipfooter, 'doublequote': self.doublequote, 'memory_map': self.memory_map, 'float_precision': self.float_precision, 'chunksize': self.chunksize, 'encoding_errors': self.encoding_errors, 'on_bad_lines': self.on_bad_lines, 'date_format': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _lowerCAmelCase ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class lowerCamelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' lowercase_ = CsvConfig def lowerCAmelCase_ ( self : Tuple ): return datasets.DatasetInfo(features=self.config.features ) def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Optional[int] ): if not self.config.data_files: raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" ) SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_lowerCAmelCase , (str, list, tuple) ): SCREAMING_SNAKE_CASE_ = data_files if isinstance(_lowerCAmelCase , _lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_lowerCAmelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] SCREAMING_SNAKE_CASE_ = [] for split_name, files in data_files.items(): if isinstance(_lowerCAmelCase , _lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = [files] SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(_lowerCAmelCase ) for file in files] splits.append(datasets.SplitGenerator(name=_lowerCAmelCase , gen_kwargs={'files': files} ) ) return splits def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : pa.Table ): if self.config.features is not None: SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema if all(not require_storage_cast(_lowerCAmelCase ) for feature in self.config.features.values() ): # cheaper cast SCREAMING_SNAKE_CASE_ = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_lowerCAmelCase ) else: # more expensive cast; allows str <-> int/float or str to Audio for example SCREAMING_SNAKE_CASE_ = table_cast(_lowerCAmelCase , _lowerCAmelCase ) return pa_table def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Optional[Any] ): SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str SCREAMING_SNAKE_CASE_ = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(_lowerCAmelCase ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCAmelCase ) ): SCREAMING_SNAKE_CASE_ = pd.read_csv(_lowerCAmelCase , iterator=_lowerCAmelCase , dtype=_lowerCAmelCase , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(_lowerCAmelCase ): SCREAMING_SNAKE_CASE_ = pa.Table.from_pandas(_lowerCAmelCase ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(_lowerCAmelCase ) except ValueError as e: logger.error(F"Failed to read file '{file}' with error {type(_lowerCAmelCase )}: {e}" ) raise
31
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class UpperCamelCase_ ( __a ): '''simple docstring''' def __init__( self :str , lowerCAmelCase__ :UNetaDModel , lowerCAmelCase__ :UNetaDModel , lowerCAmelCase__ :DDPMScheduler , lowerCAmelCase__ :Optional[Any] , ) ->Optional[int]: super().__init__() lowercase = value_function lowercase = unet lowercase = scheduler lowercase = env lowercase = env.get_dataset() lowercase = {} for key in self.data.keys(): try: lowercase = self.data[key].mean() except: # noqa: E722 pass lowercase = {} for key in self.data.keys(): try: lowercase = self.data[key].std() except: # noqa: E722 pass lowercase = env.observation_space.shape[0] lowercase = env.action_space.shape[0] def SCREAMING_SNAKE_CASE( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) ->Any: return (x_in - self.means[key]) / self.stds[key] def SCREAMING_SNAKE_CASE( self :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] ) ->Optional[Any]: return x_in * self.stds[key] + self.means[key] def SCREAMING_SNAKE_CASE( self :List[Any] , lowerCAmelCase__ :Optional[Any] ) ->Tuple: if type(lowerCAmelCase__ ) is dict: return {k: self.to_torch(lowerCAmelCase__ ) for k, v in x_in.items()} elif torch.is_tensor(lowerCAmelCase__ ): return x_in.to(self.unet.device ) return torch.tensor(lowerCAmelCase__ , device=self.unet.device ) def SCREAMING_SNAKE_CASE( self :Optional[int] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] ) ->Any: for key, val in cond.items(): lowercase = val.clone() return x_in def SCREAMING_SNAKE_CASE( self :str , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict ) ->Tuple: lowercase = x.shape[0] lowercase = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model lowercase = torch.full((batch_size,) , lowerCAmelCase__ , device=self.unet.device , dtype=torch.long ) for _ in range(lowerCAmelCase__ ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models lowercase = self.value_function(x.permute(0 , 2 , 1 ) , lowerCAmelCase__ ).sample lowercase = torch.autograd.grad([y.sum()] , [x] )[0] lowercase = self.scheduler._get_variance(lowerCAmelCase__ ) lowercase = torch.exp(0.5 * posterior_variance ) lowercase = model_std * grad lowercase = 0 lowercase = x.detach() lowercase = x + scale * grad lowercase = self.reset_xa(lowerCAmelCase__ , lowerCAmelCase__ , self.action_dim ) lowercase = self.unet(x.permute(0 , 2 , 1 ) , lowerCAmelCase__ ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg lowercase = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , predict_epsilon=lowerCAmelCase__ )["prev_sample"] # apply conditions to the trajectory (set the initial state) lowercase = self.reset_xa(lowerCAmelCase__ , lowerCAmelCase__ , self.action_dim ) lowercase = self.to_torch(lowerCAmelCase__ ) return x, y def __call__( self :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :List[str]=0.1 ) ->Tuple: # normalize the observations and create batch dimension lowercase = self.normalize(lowerCAmelCase__ , "observations" ) lowercase = obs[None].repeat(lowerCAmelCase__ , axis=0 ) lowercase = {0: self.to_torch(lowerCAmelCase__ )} lowercase = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) lowercase = randn_tensor(lowerCAmelCase__ , device=self.unet.device ) lowercase = self.reset_xa(lowerCAmelCase__ , lowerCAmelCase__ , self.action_dim ) lowercase = self.to_torch(lowerCAmelCase__ ) # run the diffusion process lowercase , lowercase = self.run_diffusion(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # sort output trajectories by value lowercase = y.argsort(0 , descending=lowerCAmelCase__ ).squeeze() lowercase = x[sorted_idx] lowercase = sorted_values[:, :, : self.action_dim] lowercase = actions.detach().cpu().numpy() lowercase = self.de_normalize(lowerCAmelCase__ , key="actions" ) # select the action with the highest value if y is not None: lowercase = 0 else: # if we didn't run value guiding, select a random action lowercase = np.random.randint(0 , lowerCAmelCase__ ) lowercase = denorm_actions[selected_index, 0] return denorm_actions
441
0
'''simple docstring''' import json import sys def _UpperCamelCase ( _a : Dict , _a : List[Any] ): """simple docstring""" with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f: __UpperCamelCase : Dict = json.load(_SCREAMING_SNAKE_CASE ) __UpperCamelCase : Any = ['<details>', '<summary>Show updated benchmarks!</summary>', ' '] for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ): __UpperCamelCase : int = results[benchmark_name] __UpperCamelCase : List[str] = benchmark_name.split('/' )[-1] output_md.append(f"""### Benchmark: {benchmark_file_name}""" ) __UpperCamelCase : List[str] = '| metric |' __UpperCamelCase : Dict = '|--------|' __UpperCamelCase : Tuple = '| new / old (diff) |' for metric_name in sorted(_SCREAMING_SNAKE_CASE ): __UpperCamelCase : Any = benchmark_res[metric_name] __UpperCamelCase : Dict = metric_vals['new'] __UpperCamelCase : int = metric_vals.get('old' , _SCREAMING_SNAKE_CASE ) __UpperCamelCase : Dict = metric_vals.get('diff' , _SCREAMING_SNAKE_CASE ) __UpperCamelCase : int = f""" {new_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else 'None' if old_val is not None: val_str += f""" / {old_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" if dif_val is not None: val_str += f""" ({dif_val:f})""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append('</details>' ) with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f: f.writelines('\n'.join(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": a= sys.argv[1] a= sys.argv[2] format_json_to_md(input_json_file, output_md_file)
701
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a= logging.get_logger(__name__) a= '''▁''' a= { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', } a= { '''vocab_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json''' ), }, '''spm_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model''' ) }, } a= { '''facebook/s2t-small-librispeech-asr''': 1_0_2_4, } a= ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de'''] a= {'''mustc''': MUSTC_LANGS} class __lowercase ( _lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = MAX_MODEL_INPUT_SIZES SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask'''] SCREAMING_SNAKE_CASE__ = [] def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<unk>" , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = None , **_lowerCamelCase , ): __UpperCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , do_upper_case=_lowerCamelCase , do_lower_case=_lowerCamelCase , tgt_lang=_lowerCamelCase , lang_codes=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) __UpperCamelCase : Union[str, Any] = do_upper_case __UpperCamelCase : Dict = do_lower_case __UpperCamelCase : List[str] = load_json(_lowerCamelCase ) __UpperCamelCase : List[Any] = {v: k for k, v in self.encoder.items()} __UpperCamelCase : int = spm_file __UpperCamelCase : List[Any] = load_spm(_lowerCamelCase , self.sp_model_kwargs ) if lang_codes is not None: __UpperCamelCase : Any = lang_codes __UpperCamelCase : Any = LANGUAGES[lang_codes] __UpperCamelCase : str = [f"""<lang:{lang}>""" for lang in self.langs] __UpperCamelCase : List[str] = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs} __UpperCamelCase : str = self.lang_tokens __UpperCamelCase : str = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: __UpperCamelCase : Dict = {} @property def lowerCAmelCase ( self ): return len(self.encoder ) @property def lowerCAmelCase ( self ): return self._tgt_lang @tgt_lang.setter def lowerCAmelCase ( self , _lowerCamelCase ): __UpperCamelCase : Optional[int] = new_tgt_lang self.set_tgt_lang_special_tokens(_lowerCamelCase ) def lowerCAmelCase ( self , _lowerCamelCase ): __UpperCamelCase : int = self.lang_code_to_id[tgt_lang] __UpperCamelCase : List[str] = [lang_code_id] def lowerCAmelCase ( self , _lowerCamelCase ): return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def lowerCAmelCase ( self , _lowerCamelCase ): return self.encoder.get(_lowerCamelCase , self.encoder[self.unk_token] ) def lowerCAmelCase ( self , _lowerCamelCase ): return self.decoder.get(_lowerCamelCase , self.unk_token ) def lowerCAmelCase ( self , _lowerCamelCase ): __UpperCamelCase : Optional[Any] = [] __UpperCamelCase : List[str] = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: __UpperCamelCase : int = self.sp_model.decode(_lowerCamelCase ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " __UpperCamelCase : int = [] else: current_sub_tokens.append(_lowerCamelCase ) __UpperCamelCase : str = self.sp_model.decode(_lowerCamelCase ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) __UpperCamelCase : Union[str, Any] = [1] * len(self.prefix_tokens ) __UpperCamelCase : Any = [1] if token_ids_a is None: return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones def lowerCAmelCase ( self ): __UpperCamelCase : Union[str, Any] = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): __UpperCamelCase : int = self.__dict__.copy() __UpperCamelCase : Dict = None return state def __setstate__( self , _lowerCamelCase ): __UpperCamelCase : List[str] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __UpperCamelCase : Optional[int] = {} __UpperCamelCase : Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs ) def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ): __UpperCamelCase : List[str] = Path(_lowerCamelCase ) assert save_dir.is_dir(), f"""{save_directory} should be a directory""" __UpperCamelCase : Optional[int] = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file'] ) __UpperCamelCase : Union[str, Any] = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file'] ) save_json(self.encoder , _lowerCamelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , _lowerCamelCase ) elif not os.path.isfile(self.spm_file ): with open(_lowerCamelCase , 'wb' ) as fi: __UpperCamelCase : Dict = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (str(_lowerCamelCase ), str(_lowerCamelCase )) def _UpperCamelCase ( _a : str , _a : Dict[str, Any] ): """simple docstring""" __UpperCamelCase : List[Any] = sentencepiece.SentencePieceProcessor(**_a ) spm.Load(str(_a ) ) return spm def _UpperCamelCase ( _a : str ): """simple docstring""" with open(_a , 'r' ) as f: return json.load(_a ) def _UpperCamelCase ( _a : Any , _a : str ): """simple docstring""" with open(_a , 'w' ) as f: json.dump(_a , _a , indent=2 )
287
0
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] = None , ): """simple docstring""" __magic_name__ : Dict = {} if train_file is not None: __magic_name__ : Optional[Any] = [train_file] if eval_file is not None: __magic_name__ : Optional[Any] = [eval_file] if test_file is not None: __magic_name__ : Dict = [test_file] __magic_name__ : Dict = datasets.load_dataset('csv' , data_files=lowerCAmelCase ) __magic_name__ : Union[str, Any] = list(ds[list(files.keys() )[0]].features.keys() ) __magic_name__ : Tuple = features_name.pop(lowerCAmelCase ) __magic_name__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) ) __magic_name__ : int = {label: i for i, label in enumerate(lowerCAmelCase )} __magic_name__ : Optional[Any] = tokenizer.model_input_names __magic_name__ : int = {} if len(lowerCAmelCase ) == 1: for k in files.keys(): __magic_name__ : Any = ds[k].map( lambda lowerCAmelCase : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=lowerCAmelCase , max_length=lowerCAmelCase , padding='max_length' ) , batched=lowerCAmelCase , ) elif len(lowerCAmelCase ) == 2: for k in files.keys(): __magic_name__ : Any = ds[k].map( lambda lowerCAmelCase : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=lowerCAmelCase , max_length=lowerCAmelCase , padding='max_length' , ) , batched=lowerCAmelCase , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: __magic_name__ : int = {k: v for k, v in ex.items() if k in input_names} __magic_name__ : Tuple = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: __magic_name__ : str = {k: v for k, v in ex.items() if k in input_names} __magic_name__ : Dict = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: __magic_name__ : int = {k: v for k, v in ex.items() if k in input_names} __magic_name__ : Optional[int] = labelaid[ex[label_name]] yield (d, label) __magic_name__ : Tuple = ( tf.data.Dataset.from_generator( lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: __magic_name__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) __magic_name__ : Tuple = ( tf.data.Dataset.from_generator( lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: __magic_name__ : Dict = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) __magic_name__ : Union[str, Any] = ( tf.data.Dataset.from_generator( lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: __magic_name__ : str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid lowerCAmelCase :Optional[int] = logging.getLogger(__name__) @dataclass class _lowerCamelCase : '''simple docstring''' A_ : int = field(metadata={"""help""": """Which column contains the label"""} ) A_ : str = field(default=__a , metadata={"""help""": """The path of the training file"""} ) A_ : Optional[str] = field(default=__a , metadata={"""help""": """The path of the development file"""} ) A_ : Optional[str] = field(default=__a , metadata={"""help""": """The path of the test file"""} ) A_ : int = field( default=1_28 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) A_ : bool = field( default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) @dataclass class _lowerCamelCase : '''simple docstring''' A_ : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) A_ : Optional[str] = field( default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) A_ : Optional[str] = field( default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) A_ : bool = field(default=__a , metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. A_ : Optional[str] = field( default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) def lowerCamelCase ( ): """simple docstring""" __magic_name__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) __magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.info( f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ' f'16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __magic_name__ : Optional[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : int = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowerCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) __magic_name__ : Union[str, Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowerCAmelCase ) , labelaid=lowerCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): __magic_name__ : Optional[int] = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , ) def compute_metrics(lowerCAmelCase : List[str] ) -> Dict: __magic_name__ : int = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer __magic_name__ : Tuple = TFTrainer( model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=lowerCAmelCase , eval_dataset=lowerCAmelCase , compute_metrics=lowerCAmelCase , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __magic_name__ : Optional[Any] = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __magic_name__ : Any = trainer.evaluate() __magic_name__ : Tuple = os.path.join(training_args.output_dir , 'eval_results.txt' ) with open(lowerCAmelCase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(f' {key} = {value}' ) writer.write(f'{key} = {value}\n' ) results.update(lowerCAmelCase ) return results if __name__ == "__main__": main()
561
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def UpperCamelCase ( a , a , a ) -> Union[str, Any]: '''simple docstring''' # Initialise PyTorch model __magic_name__ = BertConfig.from_json_file(a ) print(F'''Building PyTorch model from configuration: {config}''' ) __magic_name__ = BertForPreTraining(a ) # Load weights from tf checkpoint load_tf_weights_in_bert(a , a , a ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , a ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _lowerCAmelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
432
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowerCamelCase :int = { 'configuration_groupvit': [ 'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GroupViTConfig', 'GroupViTOnnxConfig', 'GroupViTTextConfig', 'GroupViTVisionConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :str = [ 'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GroupViTModel', 'GroupViTPreTrainedModel', 'GroupViTTextModel', 'GroupViTVisionModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :List[str] = [ 'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFGroupViTModel', 'TFGroupViTPreTrainedModel', 'TFGroupViTTextModel', 'TFGroupViTVisionModel', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys __lowerCamelCase :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
42
"""simple docstring""" import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class A__ ( nn.Module): """simple docstring""" def __init__( self: Dict )-> Dict: super().__init__() lowerCamelCase : Tuple = nn.Linear(3 , 4 ) lowerCamelCase : Optional[Any] = nn.BatchNormad(4 ) lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 ) def a__ ( self: List[str] , __a: List[Any] )-> Optional[Any]: return self.lineara(self.batchnorm(self.lineara(__a ) ) ) class A__ ( __lowercase): """simple docstring""" def a__ ( self: Tuple , __a: int , *__a: Any , **__a: Tuple )-> Tuple: return (args[0] + 1,) + args[1:], kwargs class A__ ( __lowercase): """simple docstring""" def a__ ( self: Optional[int] , __a: List[str] , __a: List[Any] )-> List[str]: return output + 1 class A__ ( unittest.TestCase): """simple docstring""" def a__ ( self: int )-> str: lowerCamelCase : List[str] = ModelForTest() lowerCamelCase : Dict = ModelHook() add_hook_to_module(__a , __a ) self.assertEqual(test_model._hf_hook , __a ) self.assertTrue(hasattr(__a , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(__a ) self.assertFalse(hasattr(__a , """_hf_hook""" ) ) self.assertFalse(hasattr(__a , """_old_forward""" ) ) def a__ ( self: int )-> str: lowerCamelCase : List[str] = ModelForTest() lowerCamelCase : Union[str, Any] = ModelHook() add_hook_to_module(__a , __a ) add_hook_to_module(__a , __a , append=__a ) self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(__a , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(__a ) self.assertFalse(hasattr(__a , """_hf_hook""" ) ) self.assertFalse(hasattr(__a , """_old_forward""" ) ) def a__ ( self: List[Any] )-> List[str]: lowerCamelCase : str = ModelForTest() lowerCamelCase : Dict = torch.randn(2 , 3 ) lowerCamelCase : Union[str, Any] = test_model(x + 1 ) lowerCamelCase : Optional[int] = test_model(x + 2 ) lowerCamelCase : List[Any] = PreForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Optional[int] = test_model(__a ) self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain lowerCamelCase : Dict = PreForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Tuple = test_model(__a ) self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks lowerCamelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(__a , __a ) lowerCamelCase : Optional[Any] = test_model(__a ) assert torch.allclose(__a , __a , atol=1e-5 ) def a__ ( self: Any )-> Optional[int]: lowerCamelCase : str = ModelForTest() lowerCamelCase : List[str] = torch.randn(2 , 3 ) lowerCamelCase : int = test_model(__a ) lowerCamelCase : Dict = PostForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Tuple = test_model(__a ) self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain lowerCamelCase : str = PostForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Optional[Any] = test_model(__a ) self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(__a , __a ) lowerCamelCase : str = test_model(__a ) assert torch.allclose(__a , output + 2 , atol=1e-5 ) def a__ ( self: int )-> Dict: lowerCamelCase : List[Any] = ModelForTest() lowerCamelCase : Optional[int] = torch.randn(2 , 3 ) lowerCamelCase : List[str] = test_model(__a ) lowerCamelCase : Any = PostForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : str = test_model(__a ) self.assertTrue(torch.allclose(__a , output + 1 ) ) self.assertTrue(outputa.requires_grad ) lowerCamelCase : Optional[int] = True lowerCamelCase : Optional[int] = test_model(__a ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def a__ ( self: List[str] )-> Union[str, Any]: lowerCamelCase : int = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device lowerCamelCase : str = torch.randn(2 , 3 ) lowerCamelCase : Dict = model(__a ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) ) lowerCamelCase : Optional[int] = torch.randn(2 , 3 ).to(0 ) lowerCamelCase : str = model(__a ) self.assertEqual(output.device , torch.device(0 ) ) def a__ ( self: List[str] )-> Tuple: lowerCamelCase : Union[str, Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices lowerCamelCase : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True} add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCamelCase : List[Any] = torch.device(hook_kwargs["""execution_device"""] ) self.assertEqual(model.batchnorm.running_mean.device , __a ) lowerCamelCase : Optional[Any] = torch.randn(2 , 3 ) lowerCamelCase : Optional[Any] = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload lowerCamelCase : Any = { """execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True, """offload_buffers""": True, } add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) lowerCamelCase : int = torch.randn(2 , 3 ) lowerCamelCase : Optional[int] = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def a__ ( self: Any )-> List[str]: lowerCamelCase : int = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices lowerCamelCase : int = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook(__a , execution_device=__a , offload=__a ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCamelCase : List[Any] = torch.device(__a ) self.assertEqual(model.batchnorm.running_mean.device , __a ) lowerCamelCase : Dict = torch.randn(2 , 3 ) lowerCamelCase : Optional[Any] = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) lowerCamelCase : Optional[int] = torch.randn(2 , 3 ) lowerCamelCase : int = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def a__ ( self: Optional[Any] )-> List[Any]: lowerCamelCase : List[Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices lowerCamelCase : Any = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook( __a , execution_device=__a , offload=__a , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCamelCase : List[Any] = torch.device(__a ) self.assertEqual(model.batchnorm.running_mean.device , __a ) lowerCamelCase : Dict = torch.randn(2 , 3 ) lowerCamelCase : int = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook( __a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) lowerCamelCase : Tuple = torch.randn(2 , 3 ) lowerCamelCase : Any = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
42
1
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration __lowercase : Optional[Any] = pytest.mark.integration __lowercase : Union[str, Any] = {'''comet'''} __lowercase : int = importlib.util.find_spec('''fairseq''') is not None __lowercase : int = {'''code_eval'''} __lowercase : List[str] = os.name == '''nt''' __lowercase : Tuple = {'''bertscore''', '''frugalscore''', '''perplexity'''} __lowercase : Optional[Any] = importlib.util.find_spec('''transformers''') is not None def lowercase_ ( _lowercase ) -> Tuple: '''simple docstring''' @wraps(_lowercase ) def wrapper(self , _lowercase ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('''"test requires Fairseq"''' ) else: test_case(self , _lowercase ) return wrapper def lowercase_ ( _lowercase ) -> int: '''simple docstring''' @wraps(_lowercase ) def wrapper(self , _lowercase ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('''"test requires transformers"''' ) else: test_case(self , _lowercase ) return wrapper def lowercase_ ( _lowercase ) -> str: '''simple docstring''' @wraps(_lowercase ) def wrapper(self , _lowercase ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('''"test not supported on Windows"''' ) else: test_case(self , _lowercase ) return wrapper def lowercase_ ( ) -> List[Any]: '''simple docstring''' lowerCamelCase_ : Optional[int] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( _lowercase , _lowercase , _lowercase ) @local class __lowercase ( parameterized.TestCase ): lowerCamelCase : Union[str, Any] = {} lowerCamelCase : str = None @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' ) def UpperCAmelCase__ (self , A ): lowerCamelCase_ : Optional[int] = '''[...]''' lowerCamelCase_ : Optional[int] = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , A ) ).module_path ) lowerCamelCase_ : int = datasets.load.import_main_class(metric_module.__name__ , dataset=A ) # check parameters lowerCamelCase_ : Optional[int] = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(A , metric_module.__name__ ): with self.use_local_metrics(): try: lowerCamelCase_ : List[Any] = doctest.testmod(A , verbose=A , raise_on_error=A ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def UpperCAmelCase__ (self , A ): lowerCamelCase_ : Dict = '''[...]''' lowerCamelCase_ : int = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , A ) ).module_path ) # run doctest with self.use_local_metrics(): lowerCamelCase_ : Any = doctest.testmod(A , verbose=A , raise_on_error=A ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def UpperCAmelCase__ (self , A , A ): if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](A ): yield else: yield @contextmanager def UpperCAmelCase__ (self ): def load_local_metric(A , *A , **A ): return load_metric(os.path.join('''metrics''' , A ) , *A , **A ) with patch('''datasets.load_metric''' ) as mock_load_metric: lowerCamelCase_ : Optional[int] = load_local_metric yield @classmethod def UpperCAmelCase__ (cls , A ): def wrapper(A ): lowerCamelCase_ : int = contextmanager(A ) lowerCamelCase_ : Dict = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('''bleurt''' ) def lowercase_ ( _lowercase ) -> Optional[int]: '''simple docstring''' import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags class __lowercase ( _lowercase ): def UpperCAmelCase__ (self , A ): assert len(input_dict['''input_ids'''] ) == 2 return np.array([1.03, 1.04] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor: lowerCamelCase_ : Tuple = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('''bertscore''' ) def lowercase_ ( _lowercase ) -> Dict: '''simple docstring''' import torch def bert_cos_score_idf(_lowercase , _lowercase , *_lowercase , **_lowercase ): return torch.tensor([[1.0, 1.0, 1.0]] * len(_lowercase ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('''bert_score.scorer.get_model''' ), patch( '''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf: lowerCamelCase_ : Tuple = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('''comet''' ) def lowercase_ ( _lowercase ) -> Any: '''simple docstring''' def load_from_checkpoint(_lowercase ): class __lowercase : def UpperCAmelCase__ (self , A , *A , **A ): assert len(A ) == 2 lowerCamelCase_ : Dict = [0.19, 0.92] return scores, sum(A ) / len(A ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('''comet.download_model''' ) as mock_download_model: lowerCamelCase_ : Any = None with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint: lowerCamelCase_ : str = load_from_checkpoint yield def lowercase_ ( ) -> Tuple: '''simple docstring''' lowerCamelCase_ : Dict = load_metric(os.path.join('''metrics''' , '''seqeval''' ) ) lowerCamelCase_ : Union[str, Any] = '''ERROR''' lowerCamelCase_ : int = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(_lowercase , match=re.escape(_lowercase ) ): metric.compute(predictions=[] , references=[] , scheme=_lowercase )
422
'''simple docstring''' def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase ) -> bool: '''simple docstring''' if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> bool: '''simple docstring''' if curr_ind == len(_lowercase ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(_lowercase ) ): if valid_connection(_lowercase , _lowercase , _lowercase , _lowercase ): # Insert current vertex into path as next transition lowerCamelCase_ : Any = next_ver # Validate created path if util_hamilton_cycle(_lowercase , _lowercase , curr_ind + 1 ): return True # Backtrack lowerCamelCase_ : Union[str, Any] = -1 return False def lowercase_ ( _lowercase , _lowercase = 0 ) -> list[int]: '''simple docstring''' lowerCamelCase_ : int = [-1] * (len(_lowercase ) + 1) # initialize start and end of path with starting index lowerCamelCase_ : str = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(_lowercase , _lowercase , 1 ) else []
422
1
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class __UpperCamelCase ( lowercase ): def __get__( self : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str]=None ): '''simple docstring''' if obj is None: return self if self.fget is None: raise AttributeError("unreadable attribute" ) UpperCAmelCase_ = '''__cached_''' + self.fget.__name__ UpperCAmelCase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) if cached is None: UpperCAmelCase_ = self.fget(UpperCAmelCase__ ) setattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return cached def __lowerCAmelCase ( A ): UpperCAmelCase_ = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F"invalid truth value {val!r}" ) def __lowerCAmelCase ( A ): if is_torch_fx_proxy(A ): return True if is_torch_available(): import torch if isinstance(A , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(A , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(A , (jnp.ndarray, Tracer) ): return True return isinstance(A , np.ndarray ) def __lowerCAmelCase ( A ): return isinstance(A , np.ndarray ) def __lowerCAmelCase ( A ): return _is_numpy(A ) def __lowerCAmelCase ( A ): import torch return isinstance(A , torch.Tensor ) def __lowerCAmelCase ( A ): return False if not is_torch_available() else _is_torch(A ) def __lowerCAmelCase ( A ): import torch return isinstance(A , torch.device ) def __lowerCAmelCase ( A ): return False if not is_torch_available() else _is_torch_device(A ) def __lowerCAmelCase ( A ): import torch if isinstance(A , A ): if hasattr(A , A ): UpperCAmelCase_ = getattr(A , A ) else: return False return isinstance(A , torch.dtype ) def __lowerCAmelCase ( A ): return False if not is_torch_available() else _is_torch_dtype(A ) def __lowerCAmelCase ( A ): import tensorflow as tf return isinstance(A , tf.Tensor ) def __lowerCAmelCase ( A ): return False if not is_tf_available() else _is_tensorflow(A ) def __lowerCAmelCase ( A ): import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(A , "is_symbolic_tensor" ): return tf.is_symbolic_tensor(A ) return type(A ) == tf.Tensor def __lowerCAmelCase ( A ): return False if not is_tf_available() else _is_tf_symbolic_tensor(A ) def __lowerCAmelCase ( A ): import jax.numpy as jnp # noqa: F811 return isinstance(A , jnp.ndarray ) def __lowerCAmelCase ( A ): return False if not is_flax_available() else _is_jax(A ) def __lowerCAmelCase ( A ): if isinstance(A , (dict, UserDict) ): return {k: to_py_obj(A ) for k, v in obj.items()} elif isinstance(A , (list, tuple) ): return [to_py_obj(A ) for o in obj] elif is_tf_tensor(A ): return obj.numpy().tolist() elif is_torch_tensor(A ): return obj.detach().cpu().tolist() elif is_jax_tensor(A ): return np.asarray(A ).tolist() elif isinstance(A , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def __lowerCAmelCase ( A ): if isinstance(A , (dict, UserDict) ): return {k: to_numpy(A ) for k, v in obj.items()} elif isinstance(A , (list, tuple) ): return np.array(A ) elif is_tf_tensor(A ): return obj.numpy() elif is_torch_tensor(A ): return obj.detach().cpu().numpy() elif is_jax_tensor(A ): return np.asarray(A ) else: return obj class __UpperCamelCase ( lowercase ): def __A ( self : Dict ): '''simple docstring''' UpperCAmelCase_ = fields(self ) # Safety and consistency checks if not len(UpperCAmelCase__ ): raise ValueError(F"{self.__class__.__name__} has no fields." ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(F"{self.__class__.__name__} should not have more than one required field." ) UpperCAmelCase_ = getattr(self , class_fields[0].name ) UpperCAmelCase_ = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(UpperCAmelCase__ ): if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): UpperCAmelCase_ = first_field.items() UpperCAmelCase_ = True else: try: UpperCAmelCase_ = iter(UpperCAmelCase__ ) UpperCAmelCase_ = True except TypeError: UpperCAmelCase_ = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(UpperCAmelCase__ ): if ( not isinstance(UpperCAmelCase__ , (list, tuple) ) or not len(UpperCAmelCase__ ) == 2 or not isinstance(element[0] , UpperCAmelCase__ ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute UpperCAmelCase_ = first_field else: # If we have a mixed iterator, raise an error raise ValueError( F"Cannot set key/value for {element}. It needs to be a tuple (key, value)." ) break setattr(self , element[0] , element[1] ) if element[1] is not None: UpperCAmelCase_ = element[1] elif first_field is not None: UpperCAmelCase_ = first_field else: for field in class_fields: UpperCAmelCase_ = getattr(self , field.name ) if v is not None: UpperCAmelCase_ = v def __delitem__( self : Tuple , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple ): '''simple docstring''' raise Exception(F"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance." ) def __A ( self : Optional[Any] , *lowerCAmelCase : Any , **lowerCAmelCase : List[Any] ): '''simple docstring''' raise Exception(F"You cannot use ``setdefault`` on a {self.__class__.__name__} instance." ) def __A ( self : int , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Tuple ): '''simple docstring''' raise Exception(F"You cannot use ``pop`` on a {self.__class__.__name__} instance." ) def __A ( self : Optional[int] , *lowerCAmelCase : Tuple , **lowerCAmelCase : int ): '''simple docstring''' raise Exception(F"You cannot use ``update`` on a {self.__class__.__name__} instance." ) def __getitem__( self : Optional[Any] , lowerCAmelCase : Tuple ): '''simple docstring''' if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): UpperCAmelCase_ = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str ): '''simple docstring''' if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(UpperCAmelCase__ , UpperCAmelCase__ ) super().__setattr__(UpperCAmelCase__ , UpperCAmelCase__ ) def __setitem__( self : str , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ): '''simple docstring''' super().__setitem__(UpperCAmelCase__ , UpperCAmelCase__ ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(UpperCAmelCase__ , UpperCAmelCase__ ) def __A ( self : List[Any] ): '''simple docstring''' return tuple(self[k] for k in self.keys() ) class __UpperCamelCase ( lowercase , lowercase ): @classmethod def __A ( cls : str , lowerCAmelCase : str ): '''simple docstring''' raise ValueError( F"{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}" ) class __UpperCamelCase ( lowercase ): SCREAMING_SNAKE_CASE__ = 'longest' SCREAMING_SNAKE_CASE__ = 'max_length' SCREAMING_SNAKE_CASE__ = 'do_not_pad' class __UpperCamelCase ( lowercase ): SCREAMING_SNAKE_CASE__ = 'pt' SCREAMING_SNAKE_CASE__ = 'tf' SCREAMING_SNAKE_CASE__ = 'np' SCREAMING_SNAKE_CASE__ = 'jax' class __UpperCamelCase : def __init__( self : List[str] , lowerCAmelCase : List[ContextManager] ): '''simple docstring''' UpperCAmelCase_ = context_managers UpperCAmelCase_ = ExitStack() def __enter__( self : Tuple ): '''simple docstring''' for context_manager in self.context_managers: self.stack.enter_context(UpperCAmelCase__ ) def __exit__( self : int , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Union[str, Any] ): '''simple docstring''' self.stack.__exit__(*UpperCAmelCase__ , **UpperCAmelCase__ ) def __lowerCAmelCase ( A ): UpperCAmelCase_ = infer_framework(A ) if framework == "tf": UpperCAmelCase_ = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase_ = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase_ = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def __lowerCAmelCase ( A ): UpperCAmelCase_ = model_class.__name__ UpperCAmelCase_ = infer_framework(A ) if framework == "tf": UpperCAmelCase_ = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": UpperCAmelCase_ = inspect.signature(model_class.forward ) # PyTorch models else: UpperCAmelCase_ = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def __lowerCAmelCase ( A , A = "" , A = "." ): def _flatten_dict(A , A="" , A="." ): for k, v in d.items(): UpperCAmelCase_ = str(A ) + delimiter + str(A ) if parent_key else k if v and isinstance(A , A ): yield from flatten_dict(A , A , delimiter=A ).items() else: yield key, v return dict(_flatten_dict(A , A , A ) ) @contextmanager def __lowerCAmelCase ( A , A = False ): if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def __lowerCAmelCase ( A , A=None ): if is_numpy_array(A ): return np.transpose(A , axes=A ) elif is_torch_tensor(A ): return array.T if axes is None else array.permute(*A ) elif is_tf_tensor(A ): import tensorflow as tf return tf.transpose(A , perm=A ) elif is_jax_tensor(A ): return jnp.transpose(A , axes=A ) else: raise ValueError(F"Type not supported for transpose: {type(A )}." ) def __lowerCAmelCase ( A , A ): if is_numpy_array(A ): return np.reshape(A , A ) elif is_torch_tensor(A ): return array.reshape(*A ) elif is_tf_tensor(A ): import tensorflow as tf return tf.reshape(A , A ) elif is_jax_tensor(A ): return jnp.reshape(A , A ) else: raise ValueError(F"Type not supported for reshape: {type(A )}." ) def __lowerCAmelCase ( A , A=None ): if is_numpy_array(A ): return np.squeeze(A , axis=A ) elif is_torch_tensor(A ): return array.squeeze() if axis is None else array.squeeze(dim=A ) elif is_tf_tensor(A ): import tensorflow as tf return tf.squeeze(A , axis=A ) elif is_jax_tensor(A ): return jnp.squeeze(A , axis=A ) else: raise ValueError(F"Type not supported for squeeze: {type(A )}." ) def __lowerCAmelCase ( A , A ): if is_numpy_array(A ): return np.expand_dims(A , A ) elif is_torch_tensor(A ): return array.unsqueeze(dim=A ) elif is_tf_tensor(A ): import tensorflow as tf return tf.expand_dims(A , axis=A ) elif is_jax_tensor(A ): return jnp.expand_dims(A , axis=A ) else: raise ValueError(F"Type not supported for expand_dims: {type(A )}." ) def __lowerCAmelCase ( A ): if is_numpy_array(A ): return np.size(A ) elif is_torch_tensor(A ): return array.numel() elif is_tf_tensor(A ): import tensorflow as tf return tf.size(A ) elif is_jax_tensor(A ): return array.size else: raise ValueError(F"Type not supported for expand_dims: {type(A )}." ) def __lowerCAmelCase ( A , A ): for key, value in auto_map.items(): if isinstance(A , (tuple, list) ): UpperCAmelCase_ = [F"{repo_id}--{v}" if (v is not None and '''--''' not in v) else v for v in value] elif value is not None and "--" not in value: UpperCAmelCase_ = F"{repo_id}--{value}" return auto_map def __lowerCAmelCase ( A ): for base_class in inspect.getmro(A ): UpperCAmelCase_ = base_class.__module__ UpperCAmelCase_ = base_class.__name__ if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel": return "tf" elif module.startswith("torch" ) or name == "PreTrainedModel": return "pt" elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F"Could not infer framework from class {model_class}." )
718
from collections.abc import Sequence def __lowerCAmelCase ( A , A = False ): if not arr: return 0 UpperCAmelCase_ = 0 if allow_empty_subarrays else float("-inf" ) UpperCAmelCase_ = 0.0 for num in arr: UpperCAmelCase_ = max(0 if allow_empty_subarrays else num , curr_sum + num ) UpperCAmelCase_ = max(A , A ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() _a: Any = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'{max_subarray_sum(nums) = }')
268
0
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class __snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = None class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ = 2 @register_to_config def __init__( self , __SCREAMING_SNAKE_CASE = 0.02 , __SCREAMING_SNAKE_CASE = 1_0_0 , __SCREAMING_SNAKE_CASE = 1.007 , __SCREAMING_SNAKE_CASE = 8_0 , __SCREAMING_SNAKE_CASE = 0.05 , __SCREAMING_SNAKE_CASE = 5_0 , ): # standard deviation of the initial noise distribution snake_case__ : int = sigma_max # setable values snake_case__ : int = None snake_case__ : np.IntTensor = None snake_case__ : torch.FloatTensor = None # sigma(t_i) def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): return sample def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): snake_case__ : Tuple = num_inference_steps snake_case__ : List[Any] = np.arange(0 , self.num_inference_steps )[::-1].copy() snake_case__ : Any = torch.from_numpy(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ) snake_case__ : Optional[Any] = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] snake_case__ : List[Any] = torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.floataa , device=__SCREAMING_SNAKE_CASE ) def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ): if self.config.s_min <= sigma <= self.config.s_max: snake_case__ : Any = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: snake_case__ : Union[str, Any] = 0 # sample eps ~ N(0, S_noise^2 * I) snake_case__ : Optional[int] = self.config.s_noise * randn_tensor(sample.shape , generator=__SCREAMING_SNAKE_CASE ).to(sample.device ) snake_case__ : List[Any] = sigma + gamma * sigma snake_case__ : Optional[int] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , ): snake_case__ : Union[str, Any] = sample_hat + sigma_hat * model_output snake_case__ : List[Any] = (sample_hat - pred_original_sample) / sigma_hat snake_case__ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=__SCREAMING_SNAKE_CASE , derivative=__SCREAMING_SNAKE_CASE , pred_original_sample=__SCREAMING_SNAKE_CASE ) def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , ): snake_case__ : List[str] = sample_prev + sigma_prev * model_output snake_case__ : Tuple = (sample_prev - pred_original_sample) / sigma_prev snake_case__ : List[str] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=__SCREAMING_SNAKE_CASE , derivative=__SCREAMING_SNAKE_CASE , pred_original_sample=__SCREAMING_SNAKE_CASE ) def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise NotImplementedError()
38
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> Dict: '''simple docstring''' snake_case__ : int = botoa.client("""iam""" ) snake_case__ : Union[str, Any] = { """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=__magic_name__ , AssumeRolePolicyDocument=json.dumps(__magic_name__ , indent=2 ) ) snake_case__ : Dict = { """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=__magic_name__ , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(__magic_name__ , indent=2 ) , ) except iam_client.exceptions.EntityAlreadyExistsException: print(f"role {role_name} already exists. Using existing one" ) def UpperCamelCase__ ( __magic_name__ : Any ) -> Tuple: '''simple docstring''' snake_case__ : List[str] = botoa.client("""iam""" ) return iam_client.get_role(RoleName=__magic_name__ )["Role"]["Arn"] def UpperCamelCase__ ( ) -> Tuple: '''simple docstring''' snake_case__ : Union[str, Any] = _ask_options( """How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , __magic_name__ , ) snake_case__ : List[Any] = None if credentials_configuration == 0: snake_case__ : Dict = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" ) snake_case__ : List[str] = aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) snake_case__ : List[str] = _ask_field("""AWS Access Key ID: """ ) snake_case__ : int = aws_access_key_id snake_case__ : Optional[Any] = _ask_field("""AWS Secret Access Key: """ ) snake_case__ : List[str] = aws_secret_access_key snake_case__ : Tuple = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" ) snake_case__ : Optional[int] = aws_region snake_case__ : int = _ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , __magic_name__ , ) if role_management == 0: snake_case__ : Optional[Any] = _ask_field("""Enter your IAM role name: """ ) else: snake_case__ : Optional[int] = """accelerate_sagemaker_execution_role""" print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" ) _create_iam_role_for_sagemaker(__magic_name__ ) snake_case__ : Dict = _ask_field( """Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , ) snake_case__ : Any = None if is_custom_docker_image: snake_case__ : str = _ask_field("""Enter your Docker image: """ , lambda __magic_name__ : str(__magic_name__ ).lower() ) snake_case__ : Tuple = _ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , ) snake_case__ : List[Any] = None if is_sagemaker_inputs_enabled: snake_case__ : str = _ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , ) snake_case__ : Optional[int] = _ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , ) snake_case__ : Optional[Any] = None if is_sagemaker_metrics_enabled: snake_case__ : List[Any] = _ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda __magic_name__ : str(__magic_name__ ).lower() , ) snake_case__ : Tuple = _ask_options( """What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , ) snake_case__ : Any = {} snake_case__ : List[Any] = _ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , ) if use_dynamo: snake_case__ : str = """dynamo_""" snake_case__ : Tuple = _ask_options( """Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , ) snake_case__ : List[str] = _ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , ) if use_custom_options: snake_case__ : str = _ask_options( """Which mode do you want to use?""" , __magic_name__ , lambda __magic_name__ : TORCH_DYNAMO_MODES[int(__magic_name__ )] , default="""default""" , ) snake_case__ : Union[str, Any] = _ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , ) snake_case__ : str = _ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=__magic_name__ , error_message="""Please enter yes or no.""" , ) snake_case__ : Dict = """Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: snake_case__ : List[str] = _ask_options( __magic_name__ , __magic_name__ , lambda __magic_name__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__magic_name__ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" snake_case__ : Optional[int] = _ask_field(__magic_name__ , lambda __magic_name__ : str(__magic_name__ ).lower() , default="""ml.p3.2xlarge""" ) snake_case__ : Dict = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): snake_case__ : Optional[Any] = _ask_field( """How many machines do you want use? [1]: """ , __magic_name__ , default=1 , ) snake_case__ : Union[str, Any] = _ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=__magic_name__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__magic_name__ , use_cpu=__magic_name__ , dynamo_config=__magic_name__ , eca_instance_type=__magic_name__ , profile=__magic_name__ , region=__magic_name__ , iam_role_name=__magic_name__ , mixed_precision=__magic_name__ , num_machines=__magic_name__ , sagemaker_inputs_file=__magic_name__ , sagemaker_metrics_file=__magic_name__ , )
38
1
'''simple docstring''' import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration __lowercase = pytest.mark.integration __lowercase = {'''comet'''} __lowercase = importlib.util.find_spec('''fairseq''') is not None __lowercase = {'''code_eval'''} __lowercase = os.name == '''nt''' __lowercase = {'''bertscore''', '''frugalscore''', '''perplexity'''} __lowercase = importlib.util.find_spec('''transformers''') is not None def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self , _SCREAMING_SNAKE_CASE ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('''"test requires Fairseq"''' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self , _SCREAMING_SNAKE_CASE ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('''"test requires transformers"''' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ): @wraps(_SCREAMING_SNAKE_CASE ) def wrapper(self , _SCREAMING_SNAKE_CASE ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('''"test not supported on Windows"''' ) else: test_case(self , _SCREAMING_SNAKE_CASE ) return wrapper def SCREAMING_SNAKE_CASE__ ( ): lowerCAmelCase_ : List[Any] =[metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) @local class _snake_case ( parameterized.TestCase ): """simple docstring""" _UpperCamelCase : List[str] = {} _UpperCamelCase : int = None @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' ) def __A ( self : Optional[int] , UpperCamelCase_ : Dict ): lowerCAmelCase_ : Union[str, Any] ='''[...]''' lowerCAmelCase_ : List[Any] =importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , UpperCamelCase_ ) ).module_path ) lowerCAmelCase_ : List[Any] =datasets.load.import_main_class(metric_module.__name__ , dataset=UpperCamelCase_ ) # check parameters lowerCAmelCase_ : List[str] =inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(UpperCamelCase_ , metric_module.__name__ ): with self.use_local_metrics(): try: lowerCAmelCase_ : Union[str, Any] =doctest.testmod(UpperCamelCase_ , verbose=UpperCamelCase_ , raise_on_error=UpperCamelCase_ ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def __A ( self : List[str] , UpperCamelCase_ : Union[str, Any] ): lowerCAmelCase_ : str ='''[...]''' lowerCAmelCase_ : List[str] =importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , UpperCamelCase_ ) ).module_path ) # run doctest with self.use_local_metrics(): lowerCAmelCase_ : Tuple =doctest.testmod(UpperCamelCase_ , verbose=UpperCamelCase_ , raise_on_error=UpperCamelCase_ ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def __A ( self : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] ): if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](UpperCamelCase_ ): yield else: yield @contextmanager def __A ( self : Any ): def load_local_metric(UpperCamelCase_ : int , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : Dict ): return load_metric(os.path.join('''metrics''' , UpperCamelCase_ ) , *UpperCamelCase_ , **UpperCamelCase_ ) with patch('''datasets.load_metric''' ) as mock_load_metric: lowerCAmelCase_ : Dict =load_local_metric yield @classmethod def __A ( cls : Optional[Any] , UpperCamelCase_ : Dict ): def wrapper(UpperCamelCase_ : Dict ): lowerCAmelCase_ : str =contextmanager(UpperCamelCase_ ) lowerCAmelCase_ : int =patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('''bleurt''' ) def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ): import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags class _snake_case ( lowerCAmelCase_ ): """simple docstring""" def __A ( self : Any , UpperCamelCase_ : Union[str, Any] ): assert len(input_dict['''input_ids'''] ) == 2 return np.array([1.0_3, 1.0_4] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor: lowerCAmelCase_ : int =MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('''bertscore''' ) def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ): import torch def bert_cos_score_idf(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): return torch.tensor([[1.0, 1.0, 1.0]] * len(_SCREAMING_SNAKE_CASE ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('''bert_score.scorer.get_model''' ), patch( '''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf: lowerCAmelCase_ : Optional[Any] =bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('''comet''' ) def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ): def load_from_checkpoint(_SCREAMING_SNAKE_CASE ): class _snake_case : """simple docstring""" def __A ( self : Optional[Any] , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : int , **UpperCamelCase_ : List[Any] ): assert len(UpperCamelCase_ ) == 2 lowerCAmelCase_ : List[str] =[0.1_9, 0.9_2] return scores, sum(UpperCamelCase_ ) / len(UpperCamelCase_ ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('''comet.download_model''' ) as mock_download_model: lowerCAmelCase_ : List[Any] =None with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint: lowerCAmelCase_ : Tuple =load_from_checkpoint yield def SCREAMING_SNAKE_CASE__ ( ): lowerCAmelCase_ : Dict =load_metric(os.path.join('''metrics''' , '''seqeval''' ) ) lowerCAmelCase_ : Tuple ='''ERROR''' lowerCAmelCase_ : int =f'Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}' with pytest.raises(_SCREAMING_SNAKE_CASE , match=re.escape(_SCREAMING_SNAKE_CASE ) ): metric.compute(predictions=[] , references=[] , scheme=_SCREAMING_SNAKE_CASE )
305
'''simple docstring''' import operator as op def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ): lowerCAmelCase_ : Union[str, Any] =[] lowerCAmelCase_ : Tuple =lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int(x / y ) # noqa: E731 integer division operation lowerCAmelCase_ : Any ={ '''^''': op.pow, '''*''': op.mul, '''/''': div, '''+''': op.add, '''-''': op.sub, } # operators & their respective operation # print table header print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' ) print('''-''' * (30 + len(_SCREAMING_SNAKE_CASE )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(_SCREAMING_SNAKE_CASE ) # append x to stack # output in tabular format print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(_SCREAMING_SNAKE_CASE ) , sep=''' | ''' ) else: lowerCAmelCase_ : Any =stack.pop() # pop stack # output in tabular format print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(_SCREAMING_SNAKE_CASE ) , sep=''' | ''' ) lowerCAmelCase_ : int =stack.pop() # pop stack # output in tabular format print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(_SCREAMING_SNAKE_CASE ) , sep=''' | ''' ) stack.append( str(opr[x](int(_SCREAMING_SNAKE_CASE ) , int(_SCREAMING_SNAKE_CASE ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(_SCREAMING_SNAKE_CASE ) , sep=''' | ''' , ) return int(stack[0] ) if __name__ == "__main__": __lowercase = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''') print('''\n\tResult = ''', solve(Postfix))
305
1
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def snake_case__( self: Any ): lowercase__ : List[Any] = 0 def snake_case__( self: List[Any] ): lowercase__ : List[Any] = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' ) self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ ) def snake_case__( self: Union[str, Any] ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ : str = Path(lowerCamelCase_ ) / 'preprocessor_config.json' lowercase__ : List[Any] = Path(lowerCamelCase_ ) / 'config.json' json.dump( {'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'}, open(lowerCamelCase_, 'w' ), ) json.dump({'model_type': 'clip'}, open(lowerCamelCase_, 'w' ) ) lowercase__ : Dict = AutoImageProcessor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ ) def snake_case__( self: List[str] ): # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ : Optional[int] = Path(lowerCamelCase_ ) / 'preprocessor_config.json' lowercase__ : Optional[Any] = Path(lowerCamelCase_ ) / 'config.json' json.dump( {'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'}, open(lowerCamelCase_, 'w' ), ) json.dump({'model_type': 'clip'}, open(lowerCamelCase_, 'w' ) ) lowercase__ : Union[str, Any] = AutoImageProcessor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ ) def snake_case__( self: Any ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ : Union[str, Any] = CLIPConfig() # Create a dummy config file with image_proceesor_type lowercase__ : List[str] = Path(lowerCamelCase_ ) / 'preprocessor_config.json' lowercase__ : Optional[int] = Path(lowerCamelCase_ ) / 'config.json' json.dump( {'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'}, open(lowerCamelCase_, 'w' ), ) json.dump({'model_type': 'clip'}, open(lowerCamelCase_, 'w' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally lowercase__ : Any = AutoImageProcessor.from_pretrained(lowerCamelCase_ ).to_dict() config_dict.pop('image_processor_type' ) lowercase__ : int = CLIPImageProcessor(**lowerCamelCase_ ) # save in new folder model_config.save_pretrained(lowerCamelCase_ ) config.save_pretrained(lowerCamelCase_ ) lowercase__ : Union[str, Any] = AutoImageProcessor.from_pretrained(lowerCamelCase_ ) # make sure private variable is not incorrectly saved lowercase__ : Tuple = json.loads(config.to_json_string() ) self.assertTrue('_processor_class' not in dict_as_saved ) self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ ) def snake_case__( self: Tuple ): with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ : Tuple = Path(lowerCamelCase_ ) / 'preprocessor_config.json' json.dump( {'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'}, open(lowerCamelCase_, 'w' ), ) lowercase__ : Dict = AutoImageProcessor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ ) def snake_case__( self: Optional[Any] ): with self.assertRaisesRegex( lowerCamelCase_, 'clip-base is not a local folder and is not a valid model identifier' ): lowercase__ : str = AutoImageProcessor.from_pretrained('clip-base' ) def snake_case__( self: Union[str, Any] ): with self.assertRaisesRegex( lowerCamelCase_, r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): lowercase__ : Tuple = AutoImageProcessor.from_pretrained(lowerCamelCase_, revision='aaaaaa' ) def snake_case__( self: Union[str, Any] ): with self.assertRaisesRegex( lowerCamelCase_, 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.', ): lowercase__ : List[str] = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' ) def snake_case__( self: List[str] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowerCamelCase_ ): lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase_ ): lowercase__ : str = AutoImageProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_image_processor', trust_remote_code=lowerCamelCase_ ) lowercase__ : List[Any] = AutoImageProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_image_processor', trust_remote_code=lowerCamelCase_ ) self.assertEqual(image_processor.__class__.__name__, 'NewImageProcessor' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(lowerCamelCase_ ) lowercase__ : Dict = AutoImageProcessor.from_pretrained(lowerCamelCase_, trust_remote_code=lowerCamelCase_ ) self.assertEqual(reloaded_image_processor.__class__.__name__, 'NewImageProcessor' ) def snake_case__( self: Union[str, Any] ): try: AutoConfig.register('custom', lowerCamelCase_ ) AutoImageProcessor.register(lowerCamelCase_, lowerCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase_ ): AutoImageProcessor.register(lowerCamelCase_, lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ : Any = Path(lowerCamelCase_ ) / 'preprocessor_config.json' lowercase__ : List[str] = Path(lowerCamelCase_ ) / 'config.json' json.dump( {'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'}, open(lowerCamelCase_, 'w' ), ) json.dump({'model_type': 'clip'}, open(lowerCamelCase_, 'w' ) ) lowercase__ : Union[str, Any] = CustomImageProcessor.from_pretrained(lowerCamelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(lowerCamelCase_ ) lowercase__ : Dict = AutoImageProcessor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def snake_case__( self: Optional[Any] ): class __lowerCAmelCase ( _UpperCamelCase ): '''simple docstring''' _A = True try: AutoConfig.register('custom', lowerCamelCase_ ) AutoImageProcessor.register(lowerCamelCase_, lowerCamelCase_ ) # If remote code is not set, the default is to use local lowercase__ : str = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' ) self.assertEqual(image_processor.__class__.__name__, 'NewImageProcessor' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. lowercase__ : List[Any] = AutoImageProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_image_processor', trust_remote_code=lowerCamelCase_ ) self.assertEqual(image_processor.__class__.__name__, 'NewImageProcessor' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub lowercase__ : Tuple = AutoImageProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_image_processor', trust_remote_code=lowerCamelCase_ ) self.assertEqual(image_processor.__class__.__name__, 'NewImageProcessor' ) self.assertTrue(not hasattr(lowerCamelCase_, 'is_local' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
266
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class __lowerCAmelCase ( _UpperCamelCase ): '''simple docstring''' _A = (EulerDiscreteScheduler,) _A = 10 def snake_case__( self: List[Any], **lowerCamelCase_: Any ): lowercase__ : Tuple = { 'num_train_timesteps': 1100, 'beta_start': 0.0_0_0_1, 'beta_end': 0.0_2, 'beta_schedule': 'linear', } config.update(**lowerCamelCase_ ) return config def snake_case__( self: Optional[Any] ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=lowerCamelCase_ ) def snake_case__( self: Optional[int] ): for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1], [0.0_0_0_2, 0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=lowerCamelCase_, beta_end=lowerCamelCase_ ) def snake_case__( self: Optional[Any] ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=lowerCamelCase_ ) def snake_case__( self: str ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCamelCase_ ) def snake_case__( self: List[str] ): lowercase__ : List[str] = self.scheduler_classes[0] lowercase__ : Tuple = self.get_scheduler_config() lowercase__ : List[str] = scheduler_class(**lowerCamelCase_ ) scheduler.set_timesteps(self.num_inference_steps ) lowercase__ : Any = torch.manual_seed(0 ) lowercase__ : int = self.dummy_model() lowercase__ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma lowercase__ : Union[str, Any] = sample.to(lowerCamelCase_ ) for i, t in enumerate(scheduler.timesteps ): lowercase__ : Dict = scheduler.scale_model_input(lowerCamelCase_, lowerCamelCase_ ) lowercase__ : Dict = model(lowerCamelCase_, lowerCamelCase_ ) lowercase__ : Tuple = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, generator=lowerCamelCase_ ) lowercase__ : List[str] = output.prev_sample lowercase__ : Any = torch.sum(torch.abs(lowerCamelCase_ ) ) lowercase__ : Optional[Any] = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1E-3 def snake_case__( self: Dict ): lowercase__ : Dict = self.scheduler_classes[0] lowercase__ : str = self.get_scheduler_config(prediction_type='v_prediction' ) lowercase__ : Optional[int] = scheduler_class(**lowerCamelCase_ ) scheduler.set_timesteps(self.num_inference_steps ) lowercase__ : Optional[int] = torch.manual_seed(0 ) lowercase__ : str = self.dummy_model() lowercase__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma lowercase__ : Optional[int] = sample.to(lowerCamelCase_ ) for i, t in enumerate(scheduler.timesteps ): lowercase__ : int = scheduler.scale_model_input(lowerCamelCase_, lowerCamelCase_ ) lowercase__ : str = model(lowerCamelCase_, lowerCamelCase_ ) lowercase__ : Tuple = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, generator=lowerCamelCase_ ) lowercase__ : List[str] = output.prev_sample lowercase__ : Dict = torch.sum(torch.abs(lowerCamelCase_ ) ) lowercase__ : Optional[Any] = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 0.0_0_0_2 ) < 1E-2 assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3 def snake_case__( self: List[Any] ): lowercase__ : List[str] = self.scheduler_classes[0] lowercase__ : Dict = self.get_scheduler_config() lowercase__ : List[str] = scheduler_class(**lowerCamelCase_ ) scheduler.set_timesteps(self.num_inference_steps, device=lowerCamelCase_ ) lowercase__ : Dict = torch.manual_seed(0 ) lowercase__ : Union[str, Any] = self.dummy_model() lowercase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() lowercase__ : List[Any] = sample.to(lowerCamelCase_ ) for t in scheduler.timesteps: lowercase__ : Optional[int] = scheduler.scale_model_input(lowerCamelCase_, lowerCamelCase_ ) lowercase__ : Dict = model(lowerCamelCase_, lowerCamelCase_ ) lowercase__ : List[Any] = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, generator=lowerCamelCase_ ) lowercase__ : Union[str, Any] = output.prev_sample lowercase__ : Union[str, Any] = torch.sum(torch.abs(lowerCamelCase_ ) ) lowercase__ : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1E-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1E-3 def snake_case__( self: Optional[int] ): lowercase__ : Tuple = self.scheduler_classes[0] lowercase__ : Tuple = self.get_scheduler_config() lowercase__ : Dict = scheduler_class(**lowerCamelCase_, use_karras_sigmas=lowerCamelCase_ ) scheduler.set_timesteps(self.num_inference_steps, device=lowerCamelCase_ ) lowercase__ : int = torch.manual_seed(0 ) lowercase__ : Optional[int] = self.dummy_model() lowercase__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() lowercase__ : List[str] = sample.to(lowerCamelCase_ ) for t in scheduler.timesteps: lowercase__ : Any = scheduler.scale_model_input(lowerCamelCase_, lowerCamelCase_ ) lowercase__ : Optional[Any] = model(lowerCamelCase_, lowerCamelCase_ ) lowercase__ : Tuple = scheduler.step(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, generator=lowerCamelCase_ ) lowercase__ : Optional[Any] = output.prev_sample lowercase__ : int = torch.sum(torch.abs(lowerCamelCase_ ) ) lowercase__ : Dict = torch.mean(torch.abs(lowerCamelCase_ ) ) assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1E-2 assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1E-3
266
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case = logging.get_logger(__name__) snake_case = { '''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''', # See all Nat models at https://huggingface.co/models?filter=nat } class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ): A__ : Any = '''nat''' A__ : List[str] = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Optional[int] , __lowerCamelCase : str=4 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Optional[int]=6_4 , __lowerCamelCase : str=[3, 4, 6, 5] , __lowerCamelCase : Dict=[2, 4, 8, 1_6] , __lowerCamelCase : Optional[int]=7 , __lowerCamelCase : Union[str, Any]=3.0 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : List[str]=0.0 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Optional[int]=0.0_2 , __lowerCamelCase : List[str]=1E-5 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : int=None , __lowerCamelCase : Any=None , **__lowerCamelCase : List[str] , ): """simple docstring""" super().__init__(**__lowerCamelCase ) _snake_case = patch_size _snake_case = num_channels _snake_case = embed_dim _snake_case = depths _snake_case = len(__lowerCamelCase ) _snake_case = num_heads _snake_case = kernel_size _snake_case = mlp_ratio _snake_case = qkv_bias _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = drop_path_rate _snake_case = hidden_act _snake_case = layer_norm_eps _snake_case = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _snake_case = int(embed_dim * 2 ** (len(__lowerCamelCase ) - 1) ) _snake_case = layer_scale_init_value _snake_case = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(__lowerCamelCase ) + 1 )] _snake_case , _snake_case = get_aligned_output_features_output_indices( out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
404
"""simple docstring""" from collections.abc import Sequence from queue import Queue class UpperCAmelCase : def __init__( self : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any=None , __lowerCamelCase : List[Any]=None ): """simple docstring""" _snake_case = start _snake_case = end _snake_case = val _snake_case = (start + end) // 2 _snake_case = left _snake_case = right def __repr__( self : List[str] ): """simple docstring""" return f"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})""" class UpperCAmelCase : def __init__( self : Dict , __lowerCamelCase : Sequence , __lowerCamelCase : Tuple ): """simple docstring""" _snake_case = collection _snake_case = function if self.collection: _snake_case = self._build_tree(0 , len(__lowerCamelCase ) - 1 ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict ): """simple docstring""" self._update_tree(self.root , __lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ): """simple docstring""" return self._query_range(self.root , __lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int ): """simple docstring""" if start == end: return SegmentTreeNode(__lowerCamelCase , __lowerCamelCase , self.collection[start] ) _snake_case = (start + end) // 2 _snake_case = self._build_tree(__lowerCamelCase , __lowerCamelCase ) _snake_case = self._build_tree(mid + 1 , __lowerCamelCase ) return SegmentTreeNode(__lowerCamelCase , __lowerCamelCase , self.fn(left.val , right.val ) , __lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ): """simple docstring""" if node.start == i and node.end == i: _snake_case = val return if i <= node.mid: self._update_tree(node.left , __lowerCamelCase , __lowerCamelCase ) else: self._update_tree(node.right , __lowerCamelCase , __lowerCamelCase ) _snake_case = self.fn(node.left.val , node.right.val ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ): """simple docstring""" if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , __lowerCamelCase , __lowerCamelCase ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , __lowerCamelCase , node.mid ) , self._query_range(node.right , node.mid + 1 , __lowerCamelCase ) , ) else: # range in right child tree return self._query_range(node.right , __lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Tuple ): """simple docstring""" if self.root is not None: _snake_case = Queue() queue.put(self.root ) while not queue.empty(): _snake_case = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print('''*''' * 5_0) snake_case = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
404
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase = { "configuration_table_transformer": [ "TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TableTransformerConfig", "TableTransformerOnnxConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ "TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TableTransformerForObjectDetection", "TableTransformerModel", "TableTransformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TableTransformerConfig, TableTransformerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TableTransformerForObjectDetection, TableTransformerModel, TableTransformerPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
26
'''simple docstring''' import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename __UpperCamelCase = "http://www.mocksite.com/file1.txt" __UpperCamelCase = "\"text\": [\"foo\", \"foo\"]" __UpperCamelCase = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8" class _A : lowercase__: str = 200 lowercase__: List[str] = {'''Content-Length''': '''100'''} lowercase__: Union[str, Any] = {} def lowercase__ ( self : Any , **__magic_name__ : List[Any] ) -> Dict: """simple docstring""" return [bytes(__magic_name__ , """utf-8""" )] def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> List[str]: """simple docstring""" return MockResponse() @pytest.mark.parametrize("""urls_type""" , [str, list, dict] ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]: """simple docstring""" import requests monkeypatch.setattr(_lowerCamelCase , """request""" , _lowerCamelCase ) __snake_case : Union[str, Any] = URL if issubclass(_lowerCamelCase , _lowerCamelCase ): __snake_case : str = url elif issubclass(_lowerCamelCase , _lowerCamelCase ): __snake_case : Dict = [url] elif issubclass(_lowerCamelCase , _lowerCamelCase ): __snake_case : Union[str, Any] = {"""train""": url} __snake_case : Dict = """dummy""" __snake_case : List[str] = """downloads""" __snake_case : List[Any] = tmp_path __snake_case : List[Any] = DownloadConfig( cache_dir=os.path.join(_lowerCamelCase , _lowerCamelCase ) , use_etag=_lowerCamelCase , ) __snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase ) __snake_case : int = dl_manager.download(_lowerCamelCase ) __snake_case : Tuple = urls for downloaded_paths in [downloaded_paths]: if isinstance(_lowerCamelCase , _lowerCamelCase ): __snake_case : Any = [downloaded_paths] __snake_case : List[Any] = [urls] elif isinstance(_lowerCamelCase , _lowerCamelCase ): assert "train" in downloaded_paths.keys() __snake_case : Tuple = downloaded_paths.values() __snake_case : Optional[int] = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(_lowerCamelCase , _lowerCamelCase ): assert downloaded_path == dl_manager.downloaded_paths[input_url] __snake_case : List[str] = Path(_lowerCamelCase ) __snake_case : Any = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() __snake_case : Union[str, Any] = downloaded_path.read_text() assert content == CONTENT __snake_case : List[str] = downloaded_path.with_suffix(""".json""" ) assert metadata_downloaded_path.exists() __snake_case : Union[str, Any] = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize("""paths_type""" , [str, list, dict] ) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]: """simple docstring""" __snake_case : Any = str(_lowerCamelCase ) if issubclass(_lowerCamelCase , _lowerCamelCase ): __snake_case : Optional[int] = filename elif issubclass(_lowerCamelCase , _lowerCamelCase ): __snake_case : Tuple = [filename] elif issubclass(_lowerCamelCase , _lowerCamelCase ): __snake_case : Dict = {"""train""": filename} __snake_case : Optional[Any] = """dummy""" __snake_case : List[Any] = xz_file.parent __snake_case : int = """extracted""" __snake_case : Dict = DownloadConfig( cache_dir=_lowerCamelCase , use_etag=_lowerCamelCase , ) __snake_case : List[str] = DownloadManager(dataset_name=_lowerCamelCase , download_config=_lowerCamelCase ) __snake_case : Optional[Any] = dl_manager.extract(_lowerCamelCase ) __snake_case : Union[str, Any] = paths for extracted_paths in [extracted_paths]: if isinstance(_lowerCamelCase , _lowerCamelCase ): __snake_case : Dict = [extracted_paths] __snake_case : int = [paths] elif isinstance(_lowerCamelCase , _lowerCamelCase ): assert "train" in extracted_paths.keys() __snake_case : int = extracted_paths.values() __snake_case : int = paths.values() assert extracted_paths for extracted_path, input_path in zip(_lowerCamelCase , _lowerCamelCase ): assert extracted_path == dl_manager.extracted_paths[input_path] __snake_case : Any = Path(_lowerCamelCase ) __snake_case : str = extracted_path.parts assert parts[-1] == hash_url_to_filename(_lowerCamelCase , etag=_lowerCamelCase ) assert parts[-2] == extracted_subdir assert extracted_path.exists() __snake_case : Optional[int] = extracted_path.read_text() __snake_case : str = text_file.read_text() assert extracted_file_content == expected_file_content def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: """simple docstring""" assert path.endswith(""".jsonl""" ) for num_items, line in enumerate(_lowerCamelCase , start=1 ): __snake_case : Tuple = json.loads(line.decode("""utf-8""" ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: """simple docstring""" __snake_case : Any = request.getfixturevalue(_lowerCamelCase ) __snake_case : str = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ): _test_jsonl(_lowerCamelCase , _lowerCamelCase ) assert num_jsonl == 2 @pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] ) def _a ( _lowerCamelCase , _lowerCamelCase ) -> List[str]: """simple docstring""" __snake_case : int = request.getfixturevalue(_lowerCamelCase ) __snake_case : List[str] = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_lowerCamelCase ) , start=1 ): _test_jsonl(_lowerCamelCase , _lowerCamelCase ) assert num_tar == 1 assert num_jsonl == 2 def _a ( _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : List[str] = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(_lowerCamelCase ) , start=1 ): assert os.path.basename(_lowerCamelCase ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
26
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging A : Tuple = logging.get_logger(__name__) if is_vision_available(): import PIL class _lowercase ( lowercase__): """simple docstring""" A__ = ["pixel_values"] def __init__( self : str , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 255 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : bool = True , **__lowerCamelCase : List[Any] , ): '''simple docstring''' super().__init__(**__lowerCamelCase ) lowerCamelCase__ : int = size if size is not None else {"shortest_edge": 224} lowerCamelCase__ : Optional[Any] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase ) lowerCamelCase__ : Tuple = crop_size if crop_size is not None else {"height": 224, "width": 224} lowerCamelCase__ : Union[str, Any] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase , param_name="crop_size" ) lowerCamelCase__ : List[str] = do_resize lowerCamelCase__ : int = size lowerCamelCase__ : Optional[int] = resample lowerCamelCase__ : List[str] = do_center_crop lowerCamelCase__ : Dict = crop_size lowerCamelCase__ : List[Any] = do_rescale lowerCamelCase__ : Optional[int] = rescale_factor lowerCamelCase__ : Optional[Any] = do_normalize lowerCamelCase__ : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowerCamelCase__ : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD lowerCamelCase__ : Optional[Any] = do_convert_rgb def lowerCAmelCase ( self : Tuple , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : str , ): '''simple docstring''' lowerCamelCase__ : int = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase ) if "shortest_edge" not in size: raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" ) lowerCamelCase__ : Any = get_resize_output_image_size(__lowerCamelCase , size=size["shortest_edge"] , default_to_square=__lowerCamelCase ) return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[int] , ): '''simple docstring''' lowerCamelCase__ : Any = get_size_dict(__lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" ) return center_crop(__lowerCamelCase , size=(size["height"], size["width"]) , data_format=__lowerCamelCase , **__lowerCamelCase ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[str] , ): '''simple docstring''' return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Dict , ): '''simple docstring''' return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : int = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **__lowerCamelCase : Optional[Any] , ): '''simple docstring''' lowerCamelCase__ : int = do_resize if do_resize is not None else self.do_resize lowerCamelCase__ : Dict = size if size is not None else self.size lowerCamelCase__ : str = get_size_dict(__lowerCamelCase , param_name="size" , default_to_square=__lowerCamelCase ) lowerCamelCase__ : Dict = resample if resample is not None else self.resample lowerCamelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase__ : int = crop_size if crop_size is not None else self.crop_size lowerCamelCase__ : List[str] = get_size_dict(__lowerCamelCase , param_name="crop_size" , default_to_square=__lowerCamelCase ) lowerCamelCase__ : Dict = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean lowerCamelCase__ : Optional[int] = image_std if image_std is not None else self.image_std lowerCamelCase__ : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowerCamelCase__ : Optional[int] = make_list_of_images(__lowerCamelCase ) if not valid_images(__lowerCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowerCamelCase__ : Union[str, Any] = [convert_to_rgb(__lowerCamelCase ) for image in images] # All transformations expect numpy arrays. lowerCamelCase__ : Tuple = [to_numpy_array(__lowerCamelCase ) for image in images] if do_resize: lowerCamelCase__ : int = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images] if do_center_crop: lowerCamelCase__ : Any = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images] if do_rescale: lowerCamelCase__ : List[str] = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images] if do_normalize: lowerCamelCase__ : Any = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images] lowerCamelCase__ : Optional[Any] = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images] lowerCamelCase__ : Dict = {"pixel_values": images} return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
717
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class _lowercase : """simple docstring""" def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : List[str]=True , __lowerCamelCase : Union[str, Any]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : List[Any]=5 , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=512 , __lowerCamelCase : Optional[Any]=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str=0.0_2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : Optional[int]=None , ): '''simple docstring''' lowerCamelCase__ : Tuple = parent lowerCamelCase__ : int = batch_size lowerCamelCase__ : List[Any] = seq_length lowerCamelCase__ : Union[str, Any] = is_training lowerCamelCase__ : Any = use_token_type_ids lowerCamelCase__ : Union[str, Any] = use_labels lowerCamelCase__ : List[str] = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : List[Any] = num_hidden_layers lowerCamelCase__ : Optional[Any] = num_attention_heads lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : str = hidden_act lowerCamelCase__ : str = hidden_dropout_prob lowerCamelCase__ : Any = attention_probs_dropout_prob lowerCamelCase__ : List[str] = max_position_embeddings lowerCamelCase__ : Optional[int] = type_vocab_size lowerCamelCase__ : List[Any] = type_sequence_label_size lowerCamelCase__ : List[str] = initializer_range lowerCamelCase__ : List[str] = num_labels lowerCamelCase__ : List[Any] = num_choices lowerCamelCase__ : Optional[Any] = scope lowerCamelCase__ : List[Any] = self.vocab_size - 1 def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ : Any = None lowerCamelCase__ : str = None lowerCamelCase__ : str = None if self.use_labels: lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ : Union[str, Any] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) lowerCamelCase__ : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def lowerCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , *__lowerCamelCase : List[Any] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = OpenAIGPTModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Tuple = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , head_mask=__lowerCamelCase ) lowerCamelCase__ : str = model(__lowerCamelCase , token_type_ids=__lowerCamelCase ) lowerCamelCase__ : Optional[int] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTLMHeadModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , *__lowerCamelCase : Tuple ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTDoubleHeadsModel(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Optional[Any] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , *__lowerCamelCase : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Dict = self.num_labels lowerCamelCase__ : Tuple = OpenAIGPTForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ : List[str] = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase ( self : List[str] ): '''simple docstring''' lowerCamelCase__ : str = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) : Any = config_and_inputs lowerCamelCase__ : Union[str, Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class _lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase): """simple docstring""" A__ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) A__ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly A__ = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def lowerCAmelCase ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple=False ): '''simple docstring''' lowerCamelCase__ : Tuple = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": lowerCamelCase__ : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Tuple = inputs_dict["labels"] lowerCamelCase__ : Any = inputs_dict["labels"] lowerCamelCase__ : Any = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=__lowerCamelCase , ) lowerCamelCase__ : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase ) return inputs_dict def lowerCAmelCase ( self : List[Any] ): '''simple docstring''' lowerCamelCase__ : Tuple = OpenAIGPTModelTester(self ) lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=__lowerCamelCase , n_embd=37 ) def lowerCAmelCase ( self : int ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*__lowerCamelCase ) def lowerCAmelCase ( self : str ): '''simple docstring''' lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Dict ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*__lowerCamelCase ) def lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*__lowerCamelCase ) @slow def lowerCAmelCase ( self : List[str] ): '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ : Any = OpenAIGPTModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_torch class _lowercase ( unittest.TestCase): """simple docstring""" @slow def lowerCAmelCase ( self : Any ): '''simple docstring''' lowerCamelCase__ : List[Any] = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" ) model.to(__lowerCamelCase ) lowerCamelCase__ : int = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=__lowerCamelCase ) # the president is lowerCamelCase__ : Union[str, Any] = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the lowerCamelCase__ : int = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase ) self.assertListEqual(output_ids[0].tolist() , __lowerCamelCase )
5
0
'''simple docstring''' import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class lowerCamelCase_ ( __a ): def __init__( self : Optional[int] , _A : Union[str, "sqlalchemy.sql.Selectable"] , _A : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _A : Optional[Features] = None , _A : str = None , _A : bool = False , **_A : Dict , ): '''simple docstring''' super().__init__(features=_A , cache_dir=_A , keep_in_memory=_A , **_A ) UpperCAmelCase__ : int = Sql( cache_dir=_A , features=_A , sql=_A , con=_A , **_A , ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = None UpperCAmelCase__ : Tuple = None UpperCAmelCase__ : List[str] = None UpperCAmelCase__ : List[str] = None self.builder.download_and_prepare( download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , ) # Build dataset for splits UpperCAmelCase__ : Optional[int] = self.builder.as_dataset( split='''train''' , verification_mode=_A , in_memory=self.keep_in_memory ) return dataset class lowerCamelCase_ : def __init__( self : Any , _A : Dataset , _A : str , _A : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , _A : Optional[int] = None , _A : Optional[int] = None , **_A : Tuple , ): '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" ) UpperCAmelCase__ : Any = dataset UpperCAmelCase__ : int = name UpperCAmelCase__ : Union[str, Any] = con UpperCAmelCase__ : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE UpperCAmelCase__ : int = num_proc UpperCAmelCase__ : Optional[int] = to_sql_kwargs def lowercase_ ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = self.to_sql_kwargs.pop('''sql''' , _A ) UpperCAmelCase__ : int = self.to_sql_kwargs.pop('''con''' , _A ) UpperCAmelCase__ : List[Any] = self.to_sql_kwargs.pop('''index''' , _A ) UpperCAmelCase__ : Optional[int] = self._write(index=_A , **self.to_sql_kwargs ) return written def lowercase_ ( self : Dict , _A : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = args UpperCAmelCase__ : int = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs UpperCAmelCase__ : Union[str, Any] = query_table( table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , ) UpperCAmelCase__ : Tuple = batch.to_pandas() UpperCAmelCase__ : Tuple = df.to_sql(self.name , self.con , index=_A , **_A ) return num_rows or len(_A ) def lowercase_ ( self : Optional[Any] , _A : Optional[int] , **_A : Tuple ): '''simple docstring''' UpperCAmelCase__ : Any = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += num_rows return written
75
'''simple docstring''' from datetime import datetime as dt import os from github import Github UpperCamelCase__ = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''feature request''', '''new model''', '''wip''', ] def a__ ( ) -> List[str]: UpperCAmelCase__ : int = Github(os.environ['''GITHUB_TOKEN'''] ) UpperCAmelCase__ : List[Any] = g.get_repo('''huggingface/transformers''' ) UpperCAmelCase__ : List[str] = repo.get_issues(state='''open''' ) for issue in open_issues: UpperCAmelCase__ : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ ) UpperCAmelCase__ : Tuple = comments[0] if len(lowerCAmelCase__ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
75
1
"""simple docstring""" import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings SCREAMING_SNAKE_CASE__:Any = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class snake_case__ ( snake_case_ ): _snake_case : bool = field(default=snake_case_, metadata={"""help""": """Whether to use SortishSampler or not."""} ) _snake_case : bool = field( default=snake_case_, metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} ) _snake_case : Optional[int] = field( default=snake_case_, metadata={ """help""": ( """The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """ """to the `max_length` value of the model configuration.""" ) }, ) _snake_case : Optional[int] = field( default=snake_case_, metadata={ """help""": ( """The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """ """to the `num_beams` value of the model configuration.""" ) }, ) _snake_case : Optional[Union[str, Path, GenerationConfig]] = field( default=snake_case_, metadata={ """help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.""" }, ) def a__ ( self ): __a = super().to_dict() for k, v in d.items(): if isinstance(lowerCamelCase , lowerCamelCase ): __a = v.to_dict() return d
67
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__:Any = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE__:Optional[Any] = { """vocab_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""", }, """merges_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""", }, """tokenizer_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""", }, } SCREAMING_SNAKE_CASE__:Union[str, Any] = { """gpt2""": 1024, """gpt2-medium""": 1024, """gpt2-large""": 1024, """gpt2-xl""": 1024, """distilgpt2""": 1024, } class snake_case__ ( snake_case_ ): _snake_case : Tuple = VOCAB_FILES_NAMES _snake_case : str = PRETRAINED_VOCAB_FILES_MAP _snake_case : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : List[str] = ["""input_ids""", """attention_mask"""] _snake_case : Dict = GPTaTokenizer def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase=False , **lowerCamelCase , ): super().__init__( lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , ) __a = kwargs.pop("add_bos_token" , lowerCamelCase ) __a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCamelCase ) != add_prefix_space: __a = getattr(lowerCamelCase , pre_tok_state.pop("type" ) ) __a = add_prefix_space __a = pre_tok_class(**lowerCamelCase ) __a = add_prefix_space def a__ ( self , *lowerCamelCase , **lowerCamelCase ): __a = kwargs.get("is_split_into_words" , lowerCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase ) def a__ ( self , *lowerCamelCase , **lowerCamelCase ): __a = kwargs.get("is_split_into_words" , lowerCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCamelCase , **lowerCamelCase ) def a__ ( self , lowerCamelCase , lowerCamelCase = None ): __a = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase ) return tuple(lowerCamelCase ) def a__ ( self , lowerCamelCase ): __a = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) + [self.eos_token_id] ) if len(lowerCamelCase ) > self.model_max_length: __a = input_ids[-self.model_max_length :] return input_ids
67
1
"""simple docstring""" import glob import os import random from string import ascii_lowercase, digits import cva UpperCamelCase_ : List[Any] = '''''' UpperCamelCase_ : List[str] = '''''' UpperCamelCase_ : List[Any] = '''''' UpperCamelCase_ : Union[str, Any] = 1 # (0 is vertical, 1 is horizontal) def A_ (): '''simple docstring''' A_ , A_ = get_dataset(__a , __a ) print("Processing..." ) A_ , A_ , A_ = update_image_and_anno(__a , __a , __a ) for index, image in enumerate(__a ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' A_ = random_chars(32 ) A_ = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0] A_ = f'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}' cva.imwrite(f'/{file_root}.jpg' , __a , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'Success {index+1}/{len(__a )} with {file_name}' ) A_ = [] for anno in new_annos[index]: A_ = f'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}' annos_list.append(__a ) with open(f'/{file_root}.txt' , "w" ) as outfile: outfile.write("\n".join(line for line in annos_list ) ) def A_ (__a , __a ): '''simple docstring''' A_ = [] A_ = [] for label_file in glob.glob(os.path.join(__a , "*.txt" ) ): A_ = label_file.split(os.sep )[-1].rsplit("." , 1 )[0] with open(__a ) as in_file: A_ = in_file.readlines() A_ = os.path.join(__a , f'{label_name}.jpg' ) A_ = [] for obj_list in obj_lists: A_ = obj_list.rstrip("\n" ).split(" " ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__a ) labels.append(__a ) return img_paths, labels def A_ (__a , __a , __a = 1 ): '''simple docstring''' A_ = [] A_ = [] A_ = [] for idx in range(len(__a ) ): A_ = [] A_ = img_list[idx] path_list.append(__a ) A_ = anno_list[idx] A_ = cva.imread(__a ) if flip_type == 1: A_ = cva.flip(__a , __a ) for bbox in img_annos: A_ = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: A_ = cva.flip(__a , __a ) for bbox in img_annos: A_ = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__a ) new_imgs_list.append(__a ) return new_imgs_list, new_annos_lists, path_list def A_ (__a = 32 ): '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" A_ = ascii_lowercase + digits return "".join(random.choice(__a ) for _ in range(__a ) ) if __name__ == "__main__": main() print('''DONE ✅''')
115
"""simple docstring""" import argparse import os import re UpperCamelCase_ : Any = '''src/transformers/models/auto''' # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict UpperCamelCase_ : Optional[int] = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''') # re pattern that matches identifiers in mappings UpperCamelCase_ : Tuple = re.compile(R'''\s*\(\s*"(\S[^"]+)"''') def A_ (__a , __a = False ): '''simple docstring''' with open(__a , "r" , encoding="utf-8" ) as f: A_ = f.read() A_ = content.split("\n" ) A_ = [] A_ = 0 while line_idx < len(__a ): if _re_intro_mapping.search(lines[line_idx] ) is not None: A_ = len(re.search(R"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8 # Start of a new mapping! while not lines[line_idx].startswith(" " * indent + "(" ): new_lines.append(lines[line_idx] ) line_idx += 1 A_ = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": A_ = line_idx while not lines[line_idx].startswith(" " * indent + ")" ): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1] ) ) else: blocks.append(lines[line_idx] ) line_idx += 1 # Sort blocks by their identifiers A_ = sorted(__a , key=lambda __a : _re_identifier.search(__a ).groups()[0] ) new_lines += blocks else: new_lines.append(lines[line_idx] ) line_idx += 1 if overwrite: with open(__a , "w" , encoding="utf-8" ) as f: f.write("\n".join(__a ) ) elif "\n".join(__a ) != content: return True def A_ (__a = False ): '''simple docstring''' A_ = [os.path.join(__a , __a ) for f in os.listdir(__a ) if f.endswith(".py" )] A_ = [sort_auto_mapping(__a , overwrite=__a ) for fname in fnames] if not overwrite and any(__a ): A_ = [f for f, d in zip(__a , __a ) if d] raise ValueError( f'The following files have auto mappings that need sorting: {", ".join(__a )}. Run `make style` to fix' " this." ) if __name__ == "__main__": UpperCamelCase_ : str = argparse.ArgumentParser() parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''') UpperCamelCase_ : List[Any] = parser.parse_args() sort_all_auto_mappings(not args.check_only)
115
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a : Tuple = { "configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = ["RemBertTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[int] = ["RemBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ "REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RemBertForCausalLM", "RemBertForMaskedLM", "RemBertForMultipleChoice", "RemBertForQuestionAnswering", "RemBertForSequenceClassification", "RemBertForTokenClassification", "RemBertLayer", "RemBertModel", "RemBertPreTrainedModel", "load_tf_weights_in_rembert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : List[str] = [ "TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRemBertForCausalLM", "TFRemBertForMaskedLM", "TFRemBertForMultipleChoice", "TFRemBertForQuestionAnswering", "TFRemBertForSequenceClassification", "TFRemBertForTokenClassification", "TFRemBertLayer", "TFRemBertModel", "TFRemBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys a : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
710
'''simple docstring''' from collections.abc import Callable class UpperCamelCase__ : """simple docstring""" def __init__( self , snake_case = None ): '''simple docstring''' UpperCAmelCase : list = [] # Stores indexes of each item for supporting updates and deletion. UpperCAmelCase : dict = {} # Stores current size of heap. UpperCAmelCase : Tuple = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. UpperCAmelCase : Tuple = key or (lambda snake_case : x) def A_ ( self , snake_case ): '''simple docstring''' return int((i - 1) / 2 ) if i > 0 else None def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : List[str] = int(2 * i + 1 ) return left if 0 < left < self.size else None def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[Any] = int(2 * i + 2 ) return right if 0 < right < self.size else None def A_ ( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase , UpperCAmelCase : List[Any] = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. UpperCAmelCase , UpperCAmelCase : Optional[int] = self.arr[j], self.arr[i] def A_ ( self , snake_case , snake_case ): '''simple docstring''' return self.arr[i][1] < self.arr[j][1] def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : int = self._left(snake_case ) UpperCAmelCase : Any = self._right(snake_case ) UpperCAmelCase : str = i if left is not None and not self._cmp(snake_case , snake_case ): UpperCAmelCase : Optional[Any] = left if right is not None and not self._cmp(snake_case , snake_case ): UpperCAmelCase : Optional[int] = right return valid_parent def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[Any] = self._parent(snake_case ) while parent is not None and not self._cmp(snake_case , snake_case ): self._swap(snake_case , snake_case ) UpperCAmelCase , UpperCAmelCase : Optional[int] = parent, self._parent(snake_case ) def A_ ( self , snake_case ): '''simple docstring''' UpperCAmelCase : Optional[int] = self._get_valid_parent(snake_case ) while valid_parent != index: self._swap(snake_case , snake_case ) UpperCAmelCase , UpperCAmelCase : Optional[int] = valid_parent, self._get_valid_parent(snake_case ) def A_ ( self , snake_case , snake_case ): '''simple docstring''' if item not in self.pos_map: return UpperCAmelCase : Optional[int] = self.pos_map[item] UpperCAmelCase : Dict = [item, self.key(snake_case )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(snake_case ) self._heapify_down(snake_case ) def A_ ( self , snake_case ): '''simple docstring''' if item not in self.pos_map: return UpperCAmelCase : Optional[Any] = self.pos_map[item] del self.pos_map[item] UpperCAmelCase : Dict = self.arr[self.size - 1] UpperCAmelCase : Any = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(snake_case ) self._heapify_down(snake_case ) def A_ ( self , snake_case , snake_case ): '''simple docstring''' UpperCAmelCase : str = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(snake_case )] ) else: UpperCAmelCase : Tuple = [item, self.key(snake_case )] UpperCAmelCase : Any = self.size self.size += 1 self._heapify_up(self.size - 1 ) def A_ ( self ): '''simple docstring''' return self.arr[0] if self.size else None def A_ ( self ): '''simple docstring''' UpperCAmelCase : Any = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def lowercase ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
609
0
import baseaa def _a ( a :Optional[int] ) -> bytes: return baseaa.baaencode(string.encode('''utf-8''' ) ) def _a ( a :List[str] ) -> str: return baseaa.baadecode(_a ).decode('''utf-8''' ) if __name__ == "__main__": UpperCAmelCase__ = "Hello World!" UpperCAmelCase__ = baseaa_encode(test) print(encoded) UpperCAmelCase__ = baseaa_decode(encoded) print(decoded)
117
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE : List[Any] = { "configuration_longformer": [ "LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongformerConfig", "LongformerOnnxConfig", ], "tokenization_longformer": ["LongformerTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Optional[int] = ["LongformerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Dict = [ "LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "LongformerForMaskedLM", "LongformerForMultipleChoice", "LongformerForQuestionAnswering", "LongformerForSequenceClassification", "LongformerForTokenClassification", "LongformerModel", "LongformerPreTrainedModel", "LongformerSelfAttention", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Dict = [ "TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFLongformerForMaskedLM", "TFLongformerForMultipleChoice", "TFLongformerForQuestionAnswering", "TFLongformerForSequenceClassification", "TFLongformerForTokenClassification", "TFLongformerModel", "TFLongformerPreTrainedModel", "TFLongformerSelfAttention", ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
257
0
'''simple docstring''' from __future__ import annotations from collections.abc import Iterator class _lowerCAmelCase : '''simple docstring''' def __init__( self : Dict , UpperCamelCase_ : Dict ) -> List[str]: '''simple docstring''' _lowercase : str = value _lowercase : List[str] = None _lowercase : Tuple = None class _lowerCAmelCase : '''simple docstring''' def __init__( self : List[str] , UpperCamelCase_ : Dict ) -> List[str]: '''simple docstring''' _lowercase : List[Any] = tree def __lowercase ( self : int , UpperCamelCase_ : str ) -> Union[str, Any]: '''simple docstring''' if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self : List[str] ) -> Optional[int]: '''simple docstring''' yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
720
'''simple docstring''' import unittest from transformers import DonutProcessor lowerCamelCase__ = 'naver-clova-ix/donut-base' class _lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowercase ( self : Tuple ) -> Optional[Any]: '''simple docstring''' _lowercase : Optional[Any] = DonutProcessor.from_pretrained(UpperCamelCase_ ) def __lowercase ( self : Tuple ) -> Tuple: '''simple docstring''' _lowercase : str = { '''name''': '''John Doe''', '''age''': '''99''', '''city''': '''Atlanta''', '''state''': '''GA''', '''zip''': '''30301''', '''phone''': '''123-4567''', '''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}], } _lowercase : List[str] = ( '''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>''' '''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>''' '''<s_nicknames><s_nickname>Johnny</s_nickname>''' '''<sep/><s_nickname>JD</s_nickname></s_nicknames>''' ) _lowercase : str = self.processor.tokenajson(UpperCamelCase_ ) self.assertDictEqual(UpperCamelCase_ , UpperCamelCase_ )
411
0
"""simple docstring""" def __lowerCamelCase ( __UpperCamelCase ) -> tuple[int, int]: """simple docstring""" try: lowerCAmelCase_ : Tuple = float(__UpperCamelCase ) except ValueError: raise ValueError("Please enter a valid number" ) lowerCAmelCase_ : Dict = decimal - int(__UpperCamelCase ) if fractional_part == 0: return int(__UpperCamelCase ), 1 else: lowerCAmelCase_ : Optional[int] = len(str(__UpperCamelCase ).split("." )[1] ) lowerCAmelCase_ : List[Any] = int(decimal * (10**number_of_frac_digits) ) lowerCAmelCase_ : List[str] = 10**number_of_frac_digits lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = denominator, numerator while True: lowerCAmelCase_ : List[Any] = dividend % divisor if remainder == 0: break lowerCAmelCase_ , lowerCAmelCase_ : List[str] = divisor, remainder lowerCAmelCase_ , lowerCAmelCase_ : str = numerator / divisor, denominator / divisor return int(__UpperCamelCase ), int(__UpperCamelCase ) if __name__ == "__main__": print(F"""{decimal_to_fraction(2) = }""") print(F"""{decimal_to_fraction(89.0) = }""") print(F"""{decimal_to_fraction('67') = }""") print(F"""{decimal_to_fraction('45.0') = }""") print(F"""{decimal_to_fraction(1.5) = }""") print(F"""{decimal_to_fraction('6.25') = }""") print(F"""{decimal_to_fraction('78td') = }""")
610
"""simple docstring""" from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig lowercase__ = logging.get_logger(__name__) # General docstring lowercase__ = """RegNetConfig""" # Base docstring lowercase__ = """facebook/regnet-y-040""" lowercase__ = [1, 1088, 7, 7] # Image classification docstring lowercase__ = """facebook/regnet-y-040""" lowercase__ = """tabby, tabby cat""" lowercase__ = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class __lowerCamelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[str] , a_ : int , a_ : int = 3 , a_ : int = 1 , a_ : int = 1 , a_ : Optional[str] = "relu" , **a_ : Optional[int] , ): super().__init__(**a_ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb lowerCAmelCase_ : Tuple = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) lowerCAmelCase_ : str = tf.keras.layers.ConvaD( filters=a_ , kernel_size=a_ , strides=a_ , padding="VALID" , groups=a_ , use_bias=a_ , name="convolution" , ) lowerCAmelCase_ : int = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" ) lowerCAmelCase_ : Any = ACTaFN[activation] if activation is not None else tf.identity def lowerCamelCase ( self : Optional[int] , a_ : Dict ): lowerCAmelCase_ : List[Any] = self.convolution(self.padding(a_ ) ) lowerCAmelCase_ : Optional[int] = self.normalization(a_ ) lowerCAmelCase_ : Union[str, Any] = self.activation(a_ ) return hidden_state class __lowerCamelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Tuple , a_ : RegNetConfig , **a_ : Optional[int] ): super().__init__(**a_ ) lowerCAmelCase_ : Union[str, Any] = config.num_channels lowerCAmelCase_ : Optional[Any] = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , ) def lowerCamelCase ( self : List[str] , a_ : Optional[Any] ): lowerCAmelCase_ : Optional[int] = shape_list(a_ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) lowerCAmelCase_ : Any = tf.transpose(a_ , perm=(0, 2, 3, 1) ) lowerCAmelCase_ : Tuple = self.embedder(a_ ) return hidden_state class __lowerCamelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : List[Any] , a_ : int , a_ : int = 2 , **a_ : Dict ): super().__init__(**a_ ) lowerCAmelCase_ : List[str] = tf.keras.layers.ConvaD( filters=a_ , kernel_size=1 , strides=a_ , use_bias=a_ , name="convolution" ) lowerCAmelCase_ : Any = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" ) def lowerCamelCase ( self : Union[str, Any] , a_ : tf.Tensor , a_ : bool = False ): return self.normalization(self.convolution(a_ ) , training=a_ ) class __lowerCamelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Union[str, Any] , a_ : int , a_ : int , **a_ : List[Any] ): super().__init__(**a_ ) lowerCAmelCase_ : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a_ , name="pooler" ) lowerCAmelCase_ : str = [ tf.keras.layers.ConvaD(filters=a_ , kernel_size=1 , activation="relu" , name="attention.0" ), tf.keras.layers.ConvaD(filters=a_ , kernel_size=1 , activation="sigmoid" , name="attention.2" ), ] def lowerCamelCase ( self : str , a_ : str ): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] lowerCAmelCase_ : Optional[Any] = self.pooler(a_ ) for layer_module in self.attention: lowerCAmelCase_ : Union[str, Any] = layer_module(a_ ) lowerCAmelCase_ : Optional[int] = hidden_state * pooled return hidden_state class __lowerCamelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Tuple , a_ : RegNetConfig , a_ : int , a_ : int , a_ : int = 1 , **a_ : Any ): super().__init__(**a_ ) lowerCAmelCase_ : Union[str, Any] = in_channels != out_channels or stride != 1 lowerCAmelCase_ : Tuple = max(1 , out_channels // config.groups_width ) lowerCAmelCase_ : Union[str, Any] = ( TFRegNetShortCut(a_ , stride=a_ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. lowerCAmelCase_ : str = [ TFRegNetConvLayer(a_ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( a_ , stride=a_ , groups=a_ , activation=config.hidden_act , name="layer.1" ), TFRegNetConvLayer(a_ , kernel_size=1 , activation=a_ , name="layer.2" ), ] lowerCAmelCase_ : str = ACTaFN[config.hidden_act] def lowerCamelCase ( self : str , a_ : Optional[int] ): lowerCAmelCase_ : List[Any] = hidden_state for layer_module in self.layers: lowerCAmelCase_ : List[str] = layer_module(a_ ) lowerCAmelCase_ : Any = self.shortcut(a_ ) hidden_state += residual lowerCAmelCase_ : Optional[int] = self.activation(a_ ) return hidden_state class __lowerCamelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Dict , a_ : RegNetConfig , a_ : int , a_ : int , a_ : int = 1 , **a_ : Union[str, Any] ): super().__init__(**a_ ) lowerCAmelCase_ : Optional[Any] = in_channels != out_channels or stride != 1 lowerCAmelCase_ : Dict = max(1 , out_channels // config.groups_width ) lowerCAmelCase_ : List[Any] = ( TFRegNetShortCut(a_ , stride=a_ , name="shortcut" ) if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut" ) ) lowerCAmelCase_ : int = [ TFRegNetConvLayer(a_ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ), TFRegNetConvLayer( a_ , stride=a_ , groups=a_ , activation=config.hidden_act , name="layer.1" ), TFRegNetSELayer(a_ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ), TFRegNetConvLayer(a_ , kernel_size=1 , activation=a_ , name="layer.3" ), ] lowerCAmelCase_ : Optional[int] = ACTaFN[config.hidden_act] def lowerCamelCase ( self : List[Any] , a_ : Union[str, Any] ): lowerCAmelCase_ : str = hidden_state for layer_module in self.layers: lowerCAmelCase_ : Tuple = layer_module(a_ ) lowerCAmelCase_ : Union[str, Any] = self.shortcut(a_ ) hidden_state += residual lowerCAmelCase_ : Tuple = self.activation(a_ ) return hidden_state class __lowerCamelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : Optional[Any] , a_ : RegNetConfig , a_ : int , a_ : int , a_ : int = 2 , a_ : int = 2 , **a_ : Optional[Any] ): super().__init__(**a_ ) lowerCAmelCase_ : Optional[int] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer lowerCAmelCase_ : Optional[Any] = [ # downsampling is done in the first layer with stride of 2 layer(a_ , a_ , a_ , stride=a_ , name="layers.0" ), *[layer(a_ , a_ , a_ , name=f'''layers.{i+1}''' ) for i in range(depth - 1 )], ] def lowerCamelCase ( self : Dict , a_ : Any ): for layer_module in self.layers: lowerCAmelCase_ : Any = layer_module(a_ ) return hidden_state class __lowerCamelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : int , a_ : RegNetConfig , **a_ : List[str] ): super().__init__(**a_ ) lowerCAmelCase_ : Tuple = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( a_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) ) lowerCAmelCase_ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(a_ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(a_ , a_ , a_ , depth=a_ , name=f'''stages.{i+1}''' ) ) def lowerCamelCase ( self : str , a_ : tf.Tensor , a_ : bool = False , a_ : bool = True ): lowerCAmelCase_ : int = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowerCAmelCase_ : List[Any] = hidden_states + (hidden_state,) lowerCAmelCase_ : str = stage_module(a_ ) if output_hidden_states: lowerCAmelCase_ : Tuple = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=a_ , hidden_states=a_ ) @keras_serializable class __lowerCamelCase ( tf.keras.layers.Layer ): '''simple docstring''' a_ : Any = RegNetConfig def __init__( self : Dict , a_ : Union[str, Any] , **a_ : Any ): super().__init__(**a_ ) lowerCAmelCase_ : List[Any] = config lowerCAmelCase_ : Optional[Any] = TFRegNetEmbeddings(a_ , name="embedder" ) lowerCAmelCase_ : List[str] = TFRegNetEncoder(a_ , name="encoder" ) lowerCAmelCase_ : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a_ , name="pooler" ) @unpack_inputs def lowerCamelCase ( self : Dict , a_ : tf.Tensor , a_ : Optional[bool] = None , a_ : Optional[bool] = None , a_ : bool = False , ): lowerCAmelCase_ : List[str] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase_ : int = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase_ : int = self.embedder(a_ , training=a_ ) lowerCAmelCase_ : Optional[int] = self.encoder( a_ , output_hidden_states=a_ , return_dict=a_ , training=a_ ) lowerCAmelCase_ : str = encoder_outputs[0] lowerCAmelCase_ : str = self.pooler(a_ ) # Change to NCHW output format have uniformity in the modules lowerCAmelCase_ : int = tf.transpose(a_ , perm=(0, 3, 1, 2) ) lowerCAmelCase_ : List[str] = tf.transpose(a_ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: lowerCAmelCase_ : Optional[int] = tuple([tf.transpose(a_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=a_ , pooler_output=a_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class __lowerCamelCase ( A__ ): '''simple docstring''' a_ : List[str] = RegNetConfig a_ : Optional[Any] = """regnet""" a_ : List[str] = """pixel_values""" @property def lowerCamelCase ( self : Union[str, Any] ): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )} lowercase__ = r""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ lowercase__ = r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( """The bare RegNet model outputting raw features without any specific head on top.""" , A__ , ) class __lowerCamelCase ( A__ ): '''simple docstring''' def __init__( self : List[str] , a_ : RegNetConfig , *a_ : Any , **a_ : Dict ): super().__init__(a_ , *a_ , **a_ ) lowerCAmelCase_ : List[str] = TFRegNetMainLayer(a_ , name="regnet" ) @unpack_inputs @add_start_docstrings_to_model_forward(a_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=a_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCamelCase ( self : Tuple , a_ : tf.Tensor , a_ : Optional[bool] = None , a_ : Optional[bool] = None , a_ : Any=False , ): lowerCAmelCase_ : List[Any] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase_ : str = self.regnet( pixel_values=a_ , output_hidden_states=a_ , return_dict=a_ , training=a_ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( """ RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. """ , A__ , ) class __lowerCamelCase ( A__ , A__ ): '''simple docstring''' def __init__( self : Optional[Any] , a_ : RegNetConfig , *a_ : Tuple , **a_ : int ): super().__init__(a_ , *a_ , **a_ ) lowerCAmelCase_ : List[Any] = config.num_labels lowerCAmelCase_ : List[Any] = TFRegNetMainLayer(a_ , name="regnet" ) # classification head lowerCAmelCase_ : str = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(a_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCamelCase ( self : Tuple , a_ : tf.Tensor = None , a_ : tf.Tensor = None , a_ : bool = None , a_ : bool = None , a_ : int=False , ): lowerCAmelCase_ : Dict = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCAmelCase_ : str = return_dict if return_dict is not None else self.config.use_return_dict lowerCAmelCase_ : List[str] = self.regnet( a_ , output_hidden_states=a_ , return_dict=a_ , training=a_ ) lowerCAmelCase_ : Dict = outputs.pooler_output if return_dict else outputs[1] lowerCAmelCase_ : Tuple = self.classifier[0](a_ ) lowerCAmelCase_ : str = self.classifier[1](a_ ) lowerCAmelCase_ : List[str] = None if labels is None else self.hf_compute_loss(labels=a_ , logits=a_ ) if not return_dict: lowerCAmelCase_ : str = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=a_ , logits=a_ , hidden_states=outputs.hidden_states )
610
1
"""simple docstring""" import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __lowerCAmelCase : Dict = logging.get_logger("transformers.models.speecht5") __lowerCAmelCase : List[Any] = { "speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm", "speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection", "speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv", "speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed", } __lowerCAmelCase : Optional[int] = { "text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens", "text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha", } __lowerCAmelCase : List[Any] = { "speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0", "speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1", "speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer", "speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha", "speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer", } __lowerCAmelCase : Optional[int] = { "speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out", "speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out", "speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv", "speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm", "speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv", "speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm", "speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv", "speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm", "speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv", "speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm", "speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv", "speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm", } __lowerCAmelCase : int = { "text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens", } __lowerCAmelCase : Any = { "text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head", } __lowerCAmelCase : Dict = { "encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj", "encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj", "encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj", "encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj", "encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm", "encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense", "encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense", "encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm", "encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm", "encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k", } __lowerCAmelCase : List[Any] = { "decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj", "decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj", "decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj", "decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj", "decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm", "decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj", "decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj", "decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj", "decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj", "decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm", "decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense", "decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense", "decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm", } __lowerCAmelCase : List[str] = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __lowerCAmelCase : int = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __lowerCAmelCase : Optional[Any] = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __lowerCAmelCase : List[Any] = [] __lowerCAmelCase : Dict = [ "encoder.version", "encoder.layers.*.norm_k.weight", "encoder.layers.*.norm_k.bias", "decoder.version", "decoder.layers.*.norm_k.weight", "decoder.layers.*.norm_k.bias", "decoder.pos_emb.pe_k", "speech_encoder_prenet.embed_positions._float_tensor", "text_decoder_prenet.embed_positions._float_tensor", ] __lowerCAmelCase : Tuple = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "speech_decoder_prenet.*", "speech_decoder_postnet.*", ] __lowerCAmelCase : Union[str, Any] = IGNORE_KEYS + [ "encoder.proj", "speech_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] __lowerCAmelCase : Dict = IGNORE_KEYS + [ "encoder.proj", "text_encoder_prenet.*", "text_decoder_prenet.*", "text_decoder_postnet.*", ] def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" for attribute in key.split(""".""" ): lowerCAmelCase__ = getattr(lowerCamelCase__ , lowerCamelCase__ ) if weight_type is not None: lowerCAmelCase__ = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape else: lowerCAmelCase__ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowerCAmelCase__ = value elif weight_type == "weight_g": lowerCAmelCase__ = value elif weight_type == "weight_v": lowerCAmelCase__ = value elif weight_type == "bias": lowerCAmelCase__ = value elif weight_type == "running_mean": lowerCAmelCase__ = value elif weight_type == "running_var": lowerCAmelCase__ = value elif weight_type == "num_batches_tracked": lowerCAmelCase__ = value else: lowerCAmelCase__ = value logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" for key in ignore_keys: if key.endswith(""".*""" ): if name.startswith(key[:-1] ): return True elif ".*." in key: lowerCAmelCase__ , lowerCAmelCase__ = key.split(""".*.""" ) if prefix in name and suffix in name: return True elif key in name: return True return False def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = [] if task == "s2t": lowerCAmelCase__ = hf_model.speechta.encoder.prenet.feature_encoder lowerCAmelCase__ = MAPPING_S2T lowerCAmelCase__ = IGNORE_KEYS_S2T elif task == "t2s": lowerCAmelCase__ = None lowerCAmelCase__ = MAPPING_T2S lowerCAmelCase__ = IGNORE_KEYS_T2S elif task == "s2s": lowerCAmelCase__ = hf_model.speechta.encoder.prenet.feature_encoder lowerCAmelCase__ = MAPPING_S2S lowerCAmelCase__ = IGNORE_KEYS_S2S else: raise ValueError(f"""Unsupported task: {task}""" ) for name, value in fairseq_dict.items(): if should_ignore(lowerCamelCase__ , lowerCamelCase__ ): logger.info(f"""{name} was ignored""" ) continue lowerCAmelCase__ = False if "conv_layers" in name: load_conv_layer( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == """group""" , ) lowerCAmelCase__ = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: lowerCAmelCase__ , lowerCAmelCase__ = key.split(""".*.""" ) if prefix in name and suffix in name: lowerCAmelCase__ = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: lowerCAmelCase__ = True if "*" in mapped_key: lowerCAmelCase__ = name.split(lowerCamelCase__ )[0].split(""".""" )[-2] lowerCAmelCase__ = mapped_key.replace("""*""" , lowerCamelCase__ ) if "weight_g" in name: lowerCAmelCase__ = """weight_g""" elif "weight_v" in name: lowerCAmelCase__ = """weight_v""" elif "bias" in name: lowerCAmelCase__ = """bias""" elif "weight" in name: lowerCAmelCase__ = """weight""" elif "running_mean" in name: lowerCAmelCase__ = """running_mean""" elif "running_var" in name: lowerCAmelCase__ = """running_var""" elif "num_batches_tracked" in name: lowerCAmelCase__ = """num_batches_tracked""" else: lowerCAmelCase__ = None set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) continue if not is_used: unused_weights.append(lowerCamelCase__ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowerCAmelCase__ = full_name.split("""conv_layers.""" )[-1] lowerCAmelCase__ = name.split(""".""" ) lowerCAmelCase__ = int(items[0] ) lowerCAmelCase__ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowerCAmelCase__ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowerCAmelCase__ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) lowerCAmelCase__ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) lowerCAmelCase__ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCamelCase__ ) @torch.no_grad() def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , ): """simple docstring""" if config_path is not None: lowerCAmelCase__ = SpeechTaConfig.from_pretrained(lowerCamelCase__ ) else: lowerCAmelCase__ = SpeechTaConfig() if task == "s2t": lowerCAmelCase__ = config.max_text_positions lowerCAmelCase__ = SpeechTaForSpeechToText(lowerCamelCase__ ) elif task == "t2s": lowerCAmelCase__ = 1876 lowerCAmelCase__ = 600 lowerCAmelCase__ = config.max_speech_positions lowerCAmelCase__ = SpeechTaForTextToSpeech(lowerCamelCase__ ) elif task == "s2s": lowerCAmelCase__ = 1876 lowerCAmelCase__ = config.max_speech_positions lowerCAmelCase__ = SpeechTaForSpeechToSpeech(lowerCamelCase__ ) else: raise ValueError(f"""Unknown task name: {task}""" ) if vocab_path: lowerCAmelCase__ = SpeechTaTokenizer(lowerCamelCase__ , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it lowerCAmelCase__ = AddedToken("""<mask>""" , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) lowerCAmelCase__ = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) lowerCAmelCase__ = SpeechTaFeatureExtractor() lowerCAmelCase__ = SpeechTaProcessor(tokenizer=lowerCamelCase__ , feature_extractor=lowerCamelCase__ ) processor.save_pretrained(lowerCamelCase__ ) lowerCAmelCase__ = torch.load(lowerCamelCase__ ) recursively_load_weights(fairseq_checkpoint["""model"""] , lowerCamelCase__ , lowerCamelCase__ ) model.save_pretrained(lowerCamelCase__ ) if repo_id: print("""Pushing to the hub...""" ) processor.push_to_hub(lowerCamelCase__ ) model.push_to_hub(lowerCamelCase__ ) if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( "--task", default="s2t", type=str, help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) __lowerCAmelCase : Any = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
674
"""simple docstring""" import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowerCAmelCase : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model") @require_sentencepiece @require_tokenizers class a_ ( __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : Tuple = PegasusTokenizer UpperCamelCase_ : Any = PegasusTokenizerFast UpperCamelCase_ : int = True UpperCamelCase_ : Any = True def _SCREAMING_SNAKE_CASE ( self : Tuple ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ = PegasusTokenizer(snake_case__ ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): return PegasusTokenizer.from_pretrained("""google/pegasus-large""" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : Optional[Any] ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Optional[Any] ): return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): lowerCAmelCase__ = """</s>""" lowerCAmelCase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """</s>""" ) self.assertEqual(vocab_keys[-1] , """v""" ) self.assertEqual(len(snake_case__ ) , 1103 ) def _SCREAMING_SNAKE_CASE ( self : Any ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = ( """Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important""" """ </s> <pad> <pad> <pad>""" ) lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : Dict ): lowerCAmelCase__ = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions.""" lowerCAmelCase__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1] lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : int ): lowerCAmelCase__ = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 96103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase__ = """To ensure a smooth flow of bank resolutions.""" lowerCAmelCase__ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1] lowerCAmelCase__ = tokenizer([raw_input_str] , return_tensors=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = ["""This is going to be way too long.""" * 150, """short example"""] lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""] lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) lowerCAmelCase__ = self._large_tokenizer( text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(snake_case__ ) == 2 # input_ids, attention_mask. @slow def _SCREAMING_SNAKE_CASE ( self : str ): # fmt: off lowerCAmelCase__ = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=snake_case__ , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , ) @require_sentencepiece @require_tokenizers class a_ ( __UpperCamelCase , unittest.TestCase ): UpperCamelCase_ : str = PegasusTokenizer UpperCamelCase_ : Optional[int] = PegasusTokenizerFast UpperCamelCase_ : Union[str, Any] = True UpperCamelCase_ : Optional[int] = True def _SCREAMING_SNAKE_CASE ( self : List[str] ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ = PegasusTokenizer(snake_case__ , offset=0 , mask_token_sent=snake_case__ , mask_token="""[MASK]""" ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Dict ): return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **snake_case__ : List[Any] ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Dict ): return ("This is a test", "This is a test") def _SCREAMING_SNAKE_CASE ( self : List[str] ): lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ = ( """Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>""" """ <pad> <pad> <pad>""" ) lowerCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] lowerCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=snake_case__ , add_special_tokens=snake_case__ ).input_ids[0] self.assertListEqual(snake_case__ , snake_case__ ) @require_torch def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = ["""This is going to be way too long.""" * 1000, """short example"""] lowerCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""] lowerCAmelCase__ = self._large_tokenizer(snake_case__ , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) lowerCAmelCase__ = self._large_tokenizer( text_target=snake_case__ , max_length=5 , padding=snake_case__ , truncation=snake_case__ , return_tensors="""pt""" ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(snake_case__ ) == 2 # input_ids, attention_mask. def _SCREAMING_SNAKE_CASE ( self : List[Any] ): lowerCAmelCase__ = ( """This is an example string that is used to test the original TF implementation against the HF""" """ implementation""" ) lowerCAmelCase__ = self._large_tokenizer(snake_case__ ).input_ids self.assertListEqual( snake_case__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
674
1
import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def lowerCamelCase__ ( _A , _A , _A ): '''simple docstring''' snake_case_ = 1.5 snake_case_ = int(factor * num_class_images ) snake_case_ = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 ) os.makedirs(f"{class_data_dir}/images" , exist_ok=a__ ) if len(list(Path(f"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images: return while True: snake_case_ = client.query(text=a__ ) if len(a__ ) >= factor * num_class_images or num_images > 1E4: break else: snake_case_ = int(factor * num_images ) snake_case_ = ClipClient( url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=a__ , aesthetic_weight=0.1 , ) snake_case_ = 0 snake_case_ = 0 snake_case_ = tqdm(desc="downloading real regularization images" , total=a__ ) with open(f"{class_data_dir}/caption.txt" , "w" ) as fa, open(f"{class_data_dir}/urls.txt" , "w" ) as fa, open( f"{class_data_dir}/images.txt" , "w" ) as fa: while total < num_class_images: snake_case_ = class_images[count] count += 1 try: snake_case_ = requests.get(images["url"] ) if img.status_code == 200: snake_case_ = Image.open(BytesIO(img.content ) ) with open(f"{class_data_dir}/images/{total}.jpg" , "wb" ) as f: f.write(img.content ) fa.write(images["caption"] + "\n" ) fa.write(images["url"] + "\n" ) fa.write(f"{class_data_dir}/images/{total}.jpg" + "\n" ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def lowerCamelCase__ ( ): '''simple docstring''' snake_case_ = argparse.ArgumentParser("" , add_help=a__ ) parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=a__ , type=a__ ) parser.add_argument("--class_data_dir" , help="path to save images" , required=a__ , type=a__ ) parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=a__ ) return parser.parse_args() if __name__ == "__main__": lowercase__ : Union[str, Any] = parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
376
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import DataLoader, IterableDataset, TensorDataset from accelerate.accelerator import Accelerator from accelerate.utils.dataclasses import DistributedType class lowerCAmelCase__ ( a ): """simple docstring""" def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = data def __iter__( self : Any ) -> Dict: """simple docstring""" for element in self.data: yield element def a__ ( a__=True ): """simple docstring""" __SCREAMING_SNAKE_CASE = Accelerator(even_batches=a__ ) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def a__ ( a__ , a__ , a__ , a__ = False ): """simple docstring""" if iterable: __SCREAMING_SNAKE_CASE = DummyIterableDataset(torch.as_tensor(range(a__ ) ) ) else: __SCREAMING_SNAKE_CASE = TensorDataset(torch.as_tensor(range(a__ ) ) ) __SCREAMING_SNAKE_CASE = DataLoader(a__ , batch_size=a__ ) __SCREAMING_SNAKE_CASE = accelerator.prepare(a__ ) return dl def a__ ( a__ , a__ , a__ , a__ , a__ , ): """simple docstring""" __SCREAMING_SNAKE_CASE = create_dataloader(accelerator=a__ , dataset_size=a__ , batch_size=a__ ) __SCREAMING_SNAKE_CASE = [len(batch[0] ) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( a__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( a__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = create_accelerator(even_batches=a__ ) verify_dataloader_batch_sizes( a__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , ) verify_dataloader_batch_sizes( a__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = create_accelerator(even_batches=a__ ) __SCREAMING_SNAKE_CASE = torch.nn.Linear(1 , 1 ) __SCREAMING_SNAKE_CASE = accelerator.prepare(a__ ) __SCREAMING_SNAKE_CASE = create_dataloader(a__ , dataset_size=3 , batch_size=1 ) __SCREAMING_SNAKE_CASE = [] with accelerator.join_uneven_inputs([ddp_model] ): for batch_idx, batch in enumerate(a__ ): __SCREAMING_SNAKE_CASE = ddp_model(batch[0].float() ) __SCREAMING_SNAKE_CASE = output.sum() loss.backward() batch_idxs.append(a__ ) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def a__ ( a__ ): """simple docstring""" with warnings.catch_warnings(record=a__ ) as w: with accelerator.join_uneven_inputs([Mock()] ): pass assert issubclass(w[-1].category , a__ ) assert "only supported for multi-GPU" in str(w[-1].message ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = create_accelerator(even_batches=a__ ) __SCREAMING_SNAKE_CASE = torch.nn.Linear(1 , 1 ) __SCREAMING_SNAKE_CASE = accelerator.prepare(a__ ) __SCREAMING_SNAKE_CASE = create_dataloader(a__ , dataset_size=3 , batch_size=1 ) __SCREAMING_SNAKE_CASE = create_dataloader(a__ , dataset_size=3 , batch_size=1 ) with accelerator.join_uneven_inputs([ddp_model] , even_batches=a__ ): __SCREAMING_SNAKE_CASE = train_dl.batch_sampler.even_batches __SCREAMING_SNAKE_CASE = valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = create_accelerator(even_batches=a__ ) __SCREAMING_SNAKE_CASE = torch.nn.Linear(1 , 1 ) __SCREAMING_SNAKE_CASE = accelerator.prepare(a__ ) create_dataloader(a__ , dataset_size=3 , batch_size=1 , iterable=a__ ) __SCREAMING_SNAKE_CASE = create_dataloader(a__ , dataset_size=3 , batch_size=1 ) with warnings.catch_warnings(): warnings.filterwarnings("""ignore""" ) try: with accelerator.join_uneven_inputs([ddp_model] , even_batches=a__ ): __SCREAMING_SNAKE_CASE = batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = create_accelerator() __SCREAMING_SNAKE_CASE = torch.nn.Linear(1 , 1 ) __SCREAMING_SNAKE_CASE = accelerator.prepare(a__ ) create_dataloader(a__ , dataset_size=3 , batch_size=1 , iterable=a__ ) with warnings.catch_warnings(record=a__ ) as w: with accelerator.join_uneven_inputs([ddp_model] , even_batches=a__ ): pass assert issubclass(w[-1].category , a__ ) assert "only supported for map-style datasets" in str(w[-1].message ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = create_accelerator() accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" ) test_default_ensures_even_batch_sizes() accelerator.print("""Run tests with even_batches disabled""" ) test_can_disable_even_batches() accelerator.print("""Test joining uneven inputs""" ) test_can_join_uneven_inputs() accelerator.print("""Test overriding even_batches when joining uneven inputs""" ) test_join_can_override_even_batches() accelerator.print("""Test overriding even_batches for mixed dataloader types""" ) test_join_can_override_for_mixed_type_dataloaders() accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" ) test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print("""Test join with non DDP distributed raises warning""" ) __SCREAMING_SNAKE_CASE = accelerator.state.distributed_type __SCREAMING_SNAKE_CASE = DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(a__ ) __SCREAMING_SNAKE_CASE = original_state if __name__ == "__main__": main()
627
0
import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters __snake_case :Any = (720, 1280) # Height, Width __snake_case :Optional[int] = (0.4, 0.6) # if height or width lower than this scale, drop it. __snake_case :int = 1 / 100 __snake_case :int = '''''' __snake_case :str = '''''' __snake_case :int = '''''' __snake_case :List[str] = 250 def __snake_case ( ): __a , __a = get_dataset(_UpperCAmelCase , _UpperCAmelCase ) for index in range(_UpperCAmelCase ): __a = random.sample(range(len(_UpperCAmelCase ) ) , 4 ) __a , __a , __a = update_image_and_anno( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , filter_scale=_UpperCAmelCase , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __a = random_chars(32 ) __a = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0] __a = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}' cva.imwrite(f'{file_root}.jpg' , _UpperCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' ) __a = [] for anno in new_annos: __a = anno[3] - anno[1] __a = anno[4] - anno[2] __a = anno[1] + width / 2 __a = anno[2] + height / 2 __a = f'{anno[0]} {x_center} {y_center} {width} {height}' annos_list.append(_UpperCAmelCase ) with open(f'{file_root}.txt' , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): __a = [] __a = [] for label_file in glob.glob(os.path.join(_UpperCAmelCase , '''*.txt''' ) ): __a = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(_UpperCAmelCase ) as in_file: __a = in_file.readlines() __a = os.path.join(_UpperCAmelCase , f'{label_name}.jpg' ) __a = [] for obj_list in obj_lists: __a = obj_list.rstrip('''\n''' ).split(''' ''' ) __a = float(obj[1] ) - float(obj[3] ) / 2 __a = float(obj[2] ) - float(obj[4] ) / 2 __a = float(obj[1] ) + float(obj[3] ) / 2 __a = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(_UpperCAmelCase ) labels.append(_UpperCAmelCase ) return img_paths, labels def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0.0 , ): __a = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) __a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) __a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) __a = int(scale_x * output_size[1] ) __a = int(scale_y * output_size[0] ) __a = [] __a = [] for i, index in enumerate(_UpperCAmelCase ): __a = all_img_list[index] path_list.append(_UpperCAmelCase ) __a = all_annos[index] __a = cva.imread(_UpperCAmelCase ) if i == 0: # top-left __a = cva.resize(_UpperCAmelCase , (divid_point_x, divid_point_y) ) __a = img for bbox in img_annos: __a = bbox[1] * scale_x __a = bbox[2] * scale_y __a = bbox[3] * scale_x __a = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right __a = cva.resize(_UpperCAmelCase , (output_size[1] - divid_point_x, divid_point_y) ) __a = img for bbox in img_annos: __a = scale_x + bbox[1] * (1 - scale_x) __a = bbox[2] * scale_y __a = scale_x + bbox[3] * (1 - scale_x) __a = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left __a = cva.resize(_UpperCAmelCase , (divid_point_x, output_size[0] - divid_point_y) ) __a = img for bbox in img_annos: __a = bbox[1] * scale_x __a = scale_y + bbox[2] * (1 - scale_y) __a = bbox[3] * scale_x __a = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right __a = cva.resize( _UpperCAmelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) __a = img for bbox in img_annos: __a = scale_x + bbox[1] * (1 - scale_x) __a = scale_y + bbox[2] * (1 - scale_y) __a = scale_x + bbox[3] * (1 - scale_x) __a = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: __a = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def __snake_case ( _UpperCAmelCase ): assert number_char > 1, "The number of character should greater than 1" __a = ascii_lowercase + digits return "".join(random.choice(_UpperCAmelCase ) for _ in range(_UpperCAmelCase ) ) if __name__ == "__main__": main() print('''DONE ✅''')
60
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel __snake_case :List[str] = HfApi() __snake_case :str = {} # fmt: off __snake_case :Optional[Any] = torch.tensor([ -0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7, 1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9, -1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9, 0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7 ]) __snake_case :Union[str, Any] = torch.tensor([ -2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6, 1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8, -2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8, 2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5 ]) __snake_case :str = torch.tensor([ -0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9, -0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4, -0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5, 0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3 ]) __snake_case :List[Any] = torch.tensor([ 0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2, -0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9, 0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5, -0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5 ]) __snake_case :Any = torch.tensor([ 0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3, -0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5, 0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9, -0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6 ]) __snake_case :List[str] = torch.tensor([ 0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8, -0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0, 0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3, -0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1 ]) __snake_case :Optional[int] = torch.tensor([ 0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2, -0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8, 0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4, -0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0 ]) __snake_case :Tuple = torch.tensor([ 0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2, -0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0, 0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6, -0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3 ]) __snake_case :List[Any] = torch.tensor([ -1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0, 1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3, -2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0, 1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1]) __snake_case :Optional[Any] = torch.tensor([ -1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4, 0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1, -2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9, 1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6 ]) __snake_case :Optional[Any] = torch.tensor([ -1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2, 0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7, -2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1, 1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5 ]) __snake_case :List[str] = torch.tensor([ -2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9, 1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1, -3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1, 3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6 ]) __snake_case :Any = torch.tensor([ -2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0, 1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8, -2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5, 2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3 ]) __snake_case :List[str] = torch.tensor([ -2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6, 1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8, -3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0, 3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3 ]) __snake_case :Union[str, Any] = torch.tensor([ -1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4, 1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1, -2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9, 1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9 ]) # fmt: on __snake_case :List[Any] = api.list_models(filter='''diffusers''') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": __snake_case :List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1] print(f'Started running {mod.modelId}!!!') if mod.modelId.startswith('''CompVis'''): __snake_case :Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''') else: __snake_case :str = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) __snake_case :List[Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) __snake_case :List[Any] = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): __snake_case :Any = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3 ) print(f'{mod.modelId} has passed successfully!!!')
60
1
'''simple docstring''' import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class __magic_name__ : def __init__( self : Optional[int] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Optional[Any]=13 ,_UpperCAmelCase : Any=2 ,_UpperCAmelCase : Union[str, Any]=24 ,_UpperCAmelCase : List[Any]=16 ,_UpperCAmelCase : str=True ,_UpperCAmelCase : Tuple=True ,_UpperCAmelCase : Union[str, Any]=32 ,_UpperCAmelCase : Optional[int]=5 ,_UpperCAmelCase : Any=4 ,_UpperCAmelCase : Union[str, Any]=37 ,_UpperCAmelCase : Union[str, Any]="gelu" ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : List[str]=0.1 ,_UpperCAmelCase : Tuple=10 ,_UpperCAmelCase : int=0.02 ,_UpperCAmelCase : Dict=None ,_UpperCAmelCase : str=2 ,_UpperCAmelCase : str=2 ,): _a : Optional[Any] = parent _a : Optional[int] = batch_size _a : Optional[Any] = patch_size _a : Optional[int] = max_length _a : Optional[int] = num_mel_bins _a : Union[str, Any] = is_training _a : List[Any] = use_labels _a : Tuple = hidden_size _a : int = num_hidden_layers _a : Tuple = num_attention_heads _a : Union[str, Any] = intermediate_size _a : List[str] = hidden_act _a : Any = hidden_dropout_prob _a : Tuple = attention_probs_dropout_prob _a : List[str] = type_sequence_label_size _a : str = initializer_range _a : List[Any] = scope _a : Optional[int] = frequency_stride _a : Dict = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) _a : Dict = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 _a : Optional[int] = (self.max_length - self.patch_size) // self.time_stride + 1 _a : Union[str, Any] = frequency_out_dimension * time_out_dimension _a : List[Any] = num_patches + 2 def __lowercase ( self : List[str] ): _a : Union[str, Any] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) _a : List[str] = None if self.use_labels: _a : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) _a : int = self.get_config() return config, input_values, labels def __lowercase ( self : Dict ): return ASTConfig( patch_size=self.patch_size ,max_length=self.max_length ,num_mel_bins=self.num_mel_bins ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__A ,initializer_range=self.initializer_range ,frequency_stride=self.frequency_stride ,time_stride=self.time_stride ,) def __lowercase ( self : Any ,_UpperCAmelCase : int ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ): _a : str = ASTModel(config=__A ) model.to(__A ) model.eval() _a : Optional[int] = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase ( self : str ): _a : List[str] = self.prepare_config_and_inputs() ( ( _a ) , ( _a ) , ( _a ) , ) : int = config_and_inputs _a : Optional[Any] = {'input_values': input_values} return config, inputs_dict @require_torch class __magic_name__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): lowerCAmelCase : List[Any] = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) lowerCAmelCase : List[Any] = ( {"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel} if is_torch_available() else {} ) lowerCAmelCase : Optional[int] = False lowerCAmelCase : Tuple = False lowerCAmelCase : Union[str, Any] = False lowerCAmelCase : str = False def __lowercase ( self : Dict ,_UpperCAmelCase : str ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any] ): if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def __lowercase ( self : List[Any] ): _a : Tuple = ASTModelTester(self ) _a : Union[str, Any] = ConfigTester(self ,config_class=__A ,has_text_modality=__A ,hidden_size=37 ) def __lowercase ( self : Dict ): self.config_tester.run_common_tests() @unittest.skip(reason='AST does not use inputs_embeds' ) def __lowercase ( self : Optional[int] ): pass def __lowercase ( self : Optional[Any] ): _a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : Optional[Any] = model_class(__A ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) _a : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A ,nn.Linear ) ) def __lowercase ( self : Union[str, Any] ): _a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : Any = model_class(__A ) _a : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a : List[Any] = [*signature.parameters.keys()] _a : Dict = ['input_values'] self.assertListEqual(arg_names[:1] ,__A ) def __lowercase ( self : Optional[Any] ): _a : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) @slow def __lowercase ( self : Optional[Any] ): for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a : int = ASTModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def __lowerCamelCase ( ) -> str: _a : Any = hf_hub_download( repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' ) _a , _a : List[Any] = torchaudio.load(_lowerCAmelCase ) return audio, sampling_rate @require_torch @require_torchaudio class __magic_name__ ( unittest.TestCase ): @cached_property def __lowercase ( self : Optional[Any] ): return ( ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ) if is_torchaudio_available() else None ) @slow def __lowercase ( self : Union[str, Any] ): _a : Optional[Any] = self.default_feature_extractor _a : str = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(__A ) _a : Optional[Any] = self.default_feature_extractor _a , _a : Tuple = prepare_audio() _a : Any = audio.squeeze().numpy() _a : Optional[int] = feature_extractor(__A ,sampling_rate=__A ,return_tensors='pt' ).to(__A ) # forward pass with torch.no_grad(): _a : Union[str, Any] = model(**__A ) # verify the logits _a : str = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape ,__A ) _a : Dict = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(__A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__A ,atol=1E-4 ) )
358
'''simple docstring''' import re from filelock import FileLock try: import nltk _A: Optional[int] = True except (ImportError, ModuleNotFoundError): _A: Dict = False if NLTK_AVAILABLE: with FileLock(""".lock""") as lock: nltk.download("""punkt""", quiet=True) def _lowerCAmelCase ( _lowerCAmelCase )-> str: re.sub('<n>' , '' , _lowerCAmelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(_lowerCAmelCase ) )
126
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : str = logging.get_logger(__name__) UpperCAmelCase__ : Any = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase__ : int = { 'vocab_file': { 'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model', 'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model', }, 'tokenizer_file': { 'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json', 'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json', }, } UpperCAmelCase__ : Optional[int] = { 'google/fnet-base': 512, 'google/fnet-large': 512, } UpperCAmelCase__ : Union[str, Any] = '▁' class lowerCAmelCase_ ( lowercase_ ): SCREAMING_SNAKE_CASE_ : str = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ : str = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """token_type_ids"""] SCREAMING_SNAKE_CASE_ : Tuple = FNetTokenizer def __init__( self : Tuple , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]="<unk>" , UpperCAmelCase_ : int="[SEP]" , UpperCAmelCase_ : List[Any]="<pad>" , UpperCAmelCase_ : str="[CLS]" , UpperCAmelCase_ : List[Any]="[MASK]" , **UpperCAmelCase_ : Tuple , ) -> str: '''simple docstring''' _UpperCAmelCase : int = ( AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ , normalized=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token ) super().__init__( UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , **UpperCAmelCase_ , ) _UpperCAmelCase : Any = do_lower_case _UpperCAmelCase : Tuple = remove_space _UpperCAmelCase : Union[str, Any] = keep_accents _UpperCAmelCase : str = vocab_file _UpperCAmelCase : Optional[int] = False if not self.vocab_file else True def a_ ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _UpperCAmelCase : Any = [self.sep_token_id] _UpperCAmelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a_ ( self : int , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' _UpperCAmelCase : List[Any] = [self.sep_token_id] _UpperCAmelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a_ ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(UpperCAmelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _UpperCAmelCase : Dict = os.path.join( UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ): copyfile(self.vocab_file , UpperCAmelCase_ ) return (out_vocab_file,)
719
from ...processing_utils import ProcessorMixin class lowerCAmelCase_ ( lowercase_ ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = """WhisperFeatureExtractor""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = """WhisperTokenizer""" def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ) -> List[Any]: '''simple docstring''' super().__init__(UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCAmelCase : List[str] = self.feature_extractor _UpperCAmelCase : Any = False def a_ ( self : Any , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Tuple=True ) -> Union[str, Any]: '''simple docstring''' return self.tokenizer.get_decoder_prompt_ids(task=UpperCAmelCase_ , language=UpperCAmelCase_ , no_timestamps=UpperCAmelCase_ ) def __call__( self : Tuple , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Dict ) -> Dict: '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*UpperCAmelCase_ , **UpperCAmelCase_ ) _UpperCAmelCase : Optional[Any] = kwargs.pop('''audio''' , UpperCAmelCase_ ) _UpperCAmelCase : Tuple = kwargs.pop('''sampling_rate''' , UpperCAmelCase_ ) _UpperCAmelCase : Union[str, Any] = kwargs.pop('''text''' , UpperCAmelCase_ ) if len(UpperCAmelCase_ ) > 0: _UpperCAmelCase : List[Any] = args[0] _UpperCAmelCase : List[Any] = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: _UpperCAmelCase : List[str] = self.feature_extractor(UpperCAmelCase_ , *UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , **UpperCAmelCase_ ) if text is not None: _UpperCAmelCase : Dict = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ ) if text is None: return inputs elif audio is None: return encodings else: _UpperCAmelCase : Union[str, Any] = encodings['''input_ids'''] return inputs def a_ ( self : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def a_ ( self : Union[str, Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any] ) -> List[str]: '''simple docstring''' return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ ) def a_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str]="np" ) -> Optional[Any]: '''simple docstring''' return self.tokenizer.get_prompt_ids(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
416
0
from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : int = logging.get_logger(__name__) A_ : Optional[int] = { 'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json', 'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json', 'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json', 'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json', 'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json', 'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json', 'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json', 'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json', 'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json', 'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json', } class _lowerCAmelCase( UpperCAmelCase_ ): """simple docstring""" a : List[str] ='''rwkv''' a : int ={'''max_position_embeddings''': '''context_length'''} def __init__( self , _lowerCamelCase=5_0_2_7_7 , _lowerCamelCase=1_0_2_4 , _lowerCamelCase=4_0_9_6 , _lowerCamelCase=3_2 , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=1e-5 , _lowerCamelCase=0 , _lowerCamelCase=0 , _lowerCamelCase=6 , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ): UpperCamelCase_: Optional[Any] = vocab_size UpperCamelCase_: int = context_length UpperCamelCase_: Optional[int] = hidden_size UpperCamelCase_: List[str] = num_hidden_layers UpperCamelCase_: Optional[int] = attention_hidden_size if attention_hidden_size is not None else hidden_size UpperCamelCase_: Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size UpperCamelCase_: List[str] = layer_norm_epsilon UpperCamelCase_: Optional[Any] = rescale_every UpperCamelCase_: Union[str, Any] = use_cache UpperCamelCase_: Optional[int] = bos_token_id UpperCamelCase_: Dict = eos_token_id super().__init__( tie_word_embeddings=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
57
import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor __a : Any = logging.get_logger(__name__) class __lowercase ( lowercase_ ): '''simple docstring''' def __init__( self : Union[str, Any] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Tuple ): """simple docstring""" warnings.warn( """The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DeiTImageProcessor instead.""" , UpperCamelCase_ , ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
637
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A : List[Any] = logging.get_logger(__name__) __A : Any = { '''edbeeching/decision-transformer-gym-hopper-medium''': ( '''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json''' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class _UpperCAmelCase ( UpperCamelCase__ ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = "decision_transformer" SCREAMING_SNAKE_CASE_ : List[Any] = ["past_key_values"] SCREAMING_SNAKE_CASE_ : List[str] = { "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : str , A : List[Any]=17 , A : Optional[int]=4 , A : str=1_28 , A : int=40_96 , A : Any=True , A : int=1 , A : int=10_24 , A : int=3 , A : Union[str, Any]=1 , A : int=None , A : str="relu" , A : Union[str, Any]=0.1 , A : Tuple=0.1 , A : Optional[int]=0.1 , A : Optional[int]=1e-5 , A : int=0.02 , A : Any=True , A : Dict=True , A : Optional[int]=5_02_56 , A : Dict=5_02_56 , A : str=False , A : Optional[Any]=False , **A : Any , ) -> List[str]: lowercase_ : Tuple = state_dim lowercase_ : List[Any] = act_dim lowercase_ : Optional[Any] = hidden_size lowercase_ : Dict = max_ep_len lowercase_ : Union[str, Any] = action_tanh lowercase_ : Dict = vocab_size lowercase_ : List[str] = n_positions lowercase_ : Tuple = n_layer lowercase_ : Any = n_head lowercase_ : Any = n_inner lowercase_ : str = activation_function lowercase_ : Tuple = resid_pdrop lowercase_ : Optional[Any] = embd_pdrop lowercase_ : Union[str, Any] = attn_pdrop lowercase_ : List[str] = layer_norm_epsilon lowercase_ : str = initializer_range lowercase_ : Dict = scale_attn_weights lowercase_ : Optional[Any] = use_cache lowercase_ : Tuple = scale_attn_by_inverse_layer_idx lowercase_ : Optional[Any] = reorder_and_upcast_attn lowercase_ : Optional[int] = bos_token_id lowercase_ : Optional[Any] = eos_token_id super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
715
"""simple docstring""" from __future__ import annotations import os from typing import Any import requests __A : List[Any] = '''https://api.github.com''' # https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user __A : Dict = BASE_URL + '''/user''' # https://github.com/settings/tokens __A : str = os.environ.get('''USER_TOKEN''', '''''') def lowercase ( __snake_case : str ): lowercase_ : Tuple = { '''Authorization''': F'''token {auth_token}''', '''Accept''': '''application/vnd.github.v3+json''', } return requests.get(__snake_case , headers=__snake_case ).json() if __name__ == "__main__": # pragma: no cover if USER_TOKEN: for key, value in fetch_github_info(USER_TOKEN).items(): print(F"""{key}: {value}""") else: raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
141
0
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class __lowerCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict=7 , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : Any=30 , SCREAMING_SNAKE_CASE__ : int=400 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : Optional[int]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[str]=1 / 255 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , ) -> List[str]: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p lowerCAmelCase__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333} lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = min_resolution lowerCAmelCase__ = max_resolution lowerCAmelCase__ = do_resize lowerCAmelCase__ = size lowerCAmelCase__ = do_normalize lowerCAmelCase__ = image_mean lowerCAmelCase__ = image_std lowerCAmelCase__ = do_rescale lowerCAmelCase__ = rescale_factor lowerCAmelCase__ = do_pad def a ( self : Dict ) -> Optional[Any]: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int=False ) -> Dict: if not batched: lowerCAmelCase__ = image_inputs[0] if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ): lowerCAmelCase__ , lowerCAmelCase__ = image.size else: lowerCAmelCase__ , lowerCAmelCase__ = image.shape[1], image.shape[2] if w < h: lowerCAmelCase__ = int(self.size["shortest_edge"] * h / w ) lowerCAmelCase__ = self.size["shortest_edge"] elif w > h: lowerCAmelCase__ = self.size["shortest_edge"] lowerCAmelCase__ = int(self.size["shortest_edge"] * w / h ) else: lowerCAmelCase__ = self.size["shortest_edge"] lowerCAmelCase__ = self.size["shortest_edge"] else: lowerCAmelCase__ = [] for image in image_inputs: lowerCAmelCase__ , lowerCAmelCase__ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCAmelCase__ = max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[0] )[0] lowerCAmelCase__ = max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ): """simple docstring""" snake_case__ = DetaImageProcessor if is_vision_available() else None def a ( self : str ) -> Tuple: lowerCAmelCase__ = DetaImageProcessingTester(self ) @property def a ( self : List[Any] ) -> List[str]: return self.image_processor_tester.prepare_image_processor_dict() def a ( self : int ) -> List[str]: lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_mean" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "image_std" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_normalize" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_resize" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_rescale" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "do_pad" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "size" ) ) def a ( self : int ) -> List[Any]: lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} ) self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ ) def a ( self : Optional[int] ) -> int: pass def a ( self : Union[str, Any] ) -> str: # Initialize image_processing lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image ) # Test not batched input lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def a ( self : Dict ) -> Optional[int]: # Initialize image_processing lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) # Test not batched input lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def a ( self : Dict ) -> Optional[Any]: # Initialize image_processing lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ) # Test not batched input lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase__ = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def a ( self : Tuple ) -> List[Any]: # prepare image and target lowerCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: lowerCAmelCase__ = json.loads(f.read() ) lowerCAmelCase__ = {"image_id": 39_769, "annotations": target} # encode them lowerCAmelCase__ = DetaImageProcessor() lowerCAmelCase__ = image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ) # verify pixel values lowerCAmelCase__ = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["pixel_values"].shape , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) ) # verify area lowerCAmelCase__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , SCREAMING_SNAKE_CASE__ ) ) # verify boxes lowerCAmelCase__ = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) ) # verify image_id lowerCAmelCase__ = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , SCREAMING_SNAKE_CASE__ ) ) # verify is_crowd lowerCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , SCREAMING_SNAKE_CASE__ ) ) # verify class_labels lowerCAmelCase__ = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , SCREAMING_SNAKE_CASE__ ) ) # verify orig_size lowerCAmelCase__ = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , SCREAMING_SNAKE_CASE__ ) ) # verify size lowerCAmelCase__ = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , SCREAMING_SNAKE_CASE__ ) ) @slow def a ( self : Optional[int] ) -> Optional[Any]: # prepare image, target and masks_path lowerCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: lowerCAmelCase__ = json.loads(f.read() ) lowerCAmelCase__ = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target} lowerCAmelCase__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them lowerCAmelCase__ = DetaImageProcessor(format="coco_panoptic" ) lowerCAmelCase__ = image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , masks_path=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ) # verify pixel values lowerCAmelCase__ = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["pixel_values"].shape , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) ) # verify area lowerCAmelCase__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , SCREAMING_SNAKE_CASE__ ) ) # verify boxes lowerCAmelCase__ = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) ) # verify image_id lowerCAmelCase__ = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , SCREAMING_SNAKE_CASE__ ) ) # verify is_crowd lowerCAmelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , SCREAMING_SNAKE_CASE__ ) ) # verify class_labels lowerCAmelCase__ = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , SCREAMING_SNAKE_CASE__ ) ) # verify masks lowerCAmelCase__ = 822_873 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , SCREAMING_SNAKE_CASE__ ) # verify orig_size lowerCAmelCase__ = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , SCREAMING_SNAKE_CASE__ ) ) # verify size lowerCAmelCase__ = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , SCREAMING_SNAKE_CASE__ ) )
61
from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _lowerCAmelCase : """simple docstring""" _lowercase : List[str] = PegasusConfig _lowercase : Union[str, Any] = {} _lowercase : Tuple = '''gelu''' def __init__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=1_3 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : int=9_9 , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : str=2 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Tuple=3_7 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : str=4_0 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Dict=0 , ): '''simple docstring''' snake_case__ = parent snake_case__ = batch_size snake_case__ = seq_length snake_case__ = is_training snake_case__ = use_labels snake_case__ = vocab_size snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = max_position_embeddings snake_case__ = eos_token_id snake_case__ = pad_token_id snake_case__ = bos_token_id def __magic_name__ ( self : Optional[Any]): '''simple docstring''' snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1) snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1) snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) snake_case__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) snake_case__ = prepare_pegasus_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) return config, inputs_dict def __magic_name__ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]): '''simple docstring''' snake_case__ = TFPegasusModel(config=UpperCamelCase__).get_decoder() snake_case__ = inputs_dict["""input_ids"""] snake_case__ = input_ids[:1, :] snake_case__ = inputs_dict["""attention_mask"""][:1, :] snake_case__ = inputs_dict["""head_mask"""] snake_case__ = 1 # first forward pass snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__) snake_case__ , snake_case__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size) snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta) # append to next input_ids and snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1) snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1) snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)[0] snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__)[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1]) # select random slice snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1])) snake_case__ = output_from_no_past[:, -3:, random_slice_idx] snake_case__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3) def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : List[str] , a : str=None , a : int=None , a : int=None , a : int=None , a : Optional[int]=None , ): if attention_mask is None: snake_case__ = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: snake_case__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" _lowercase : int = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () _lowercase : List[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else () _lowercase : List[Any] = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) _lowercase : Optional[int] = True _lowercase : Dict = False _lowercase : Any = False def __magic_name__ ( self : str): '''simple docstring''' snake_case__ = TFPegasusModelTester(self) snake_case__ = ConfigTester(self , config_class=UpperCamelCase__) def __magic_name__ ( self : List[Any]): '''simple docstring''' self.config_tester.run_common_tests() def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__) @require_sentencepiece @require_tokenizers @require_tf class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" _lowercase : List[str] = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] _lowercase : str = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers _lowercase : int = '''google/pegasus-xsum''' @cached_property def __magic_name__ ( self : Dict): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name) @cached_property def __magic_name__ ( self : int): '''simple docstring''' snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name) return model def __magic_name__ ( self : Dict , **UpperCamelCase__ : List[Any]): '''simple docstring''' snake_case__ = self.translate_src_text(**UpperCamelCase__) assert self.expected_text == generated_words def __magic_name__ ( self : str , **UpperCamelCase__ : List[Any]): '''simple docstring''' snake_case__ = self.tokenizer(self.src_text , **UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""tf""") snake_case__ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase__ , ) snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase__) return generated_words @slow def __magic_name__ ( self : List[str]): '''simple docstring''' self._assert_generated_batch_equal_expected()
654
0
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) __lowerCAmelCase : int = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='relu')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='relu')) classifier.add(layers.Dense(units=1, activation='sigmoid')) # Compiling the CNN classifier.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') __lowerCAmelCase : Dict = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) __lowerCAmelCase : Tuple = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) __lowerCAmelCase : Optional[int] = train_datagen.flow_from_directory( 'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) __lowerCAmelCase : List[str] = test_datagen.flow_from_directory( 'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('cnn.h5') # Part 3 - Making new predictions __lowerCAmelCase : List[Any] = tf.keras.preprocessing.image.load_img( 'dataset/single_prediction/image.png', target_size=(64, 64) ) __lowerCAmelCase : Optional[int] = tf.keras.preprocessing.image.img_to_array(test_image) __lowerCAmelCase : Optional[Any] = np.expand_dims(test_image, axis=0) __lowerCAmelCase : Optional[int] = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: __lowerCAmelCase : str = 'Normal' if result[0][0] == 1: __lowerCAmelCase : Tuple = 'Abnormality detected'
662
__lowerCAmelCase : List[Any] = {str(digit): digit**5 for digit in range(10)} def __magic_name__ ( A : int ): '''simple docstring''' return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) ) def __magic_name__ ( ): '''simple docstring''' return sum( number for number in range(1000, 1000000 ) if number == digits_fifth_powers_sum(A ) ) if __name__ == "__main__": print(solution())
662
1
def _snake_case (__lowercase = 10 , __lowercase = 22): UpperCamelCase_ = range(1 , __lowercase) UpperCamelCase_ = range(1 , __lowercase) return sum( 1 for power in powers for base in bases if len(str(base**power)) == power) if __name__ == "__main__": print(f'{solution(1_0, 2_2) = }')
23
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
699
0
import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput lowerCamelCase : List[Any] = 'scheduler_config.json' class __lowercase (UpperCamelCase__ ): """simple docstring""" _snake_case = 1 _snake_case = 2 _snake_case = 3 _snake_case = 4 _snake_case = 5 _snake_case = 6 _snake_case = 7 _snake_case = 8 _snake_case = 9 _snake_case = 10 _snake_case = 11 _snake_case = 12 _snake_case = 13 _snake_case = 14 @dataclass class __lowercase (UpperCamelCase__ ): """simple docstring""" _snake_case = 42 class __lowercase : """simple docstring""" _snake_case = SCHEDULER_CONFIG_NAME _snake_case = [] _snake_case = True @classmethod def UpperCAmelCase ( cls , A = None , A = None , A=False , **A , ) -> List[Any]: snake_case : Optional[int] = cls.load_config( pretrained_model_name_or_path=A , subfolder=A , return_unused_kwargs=A , return_commit_hash=A , **A , ) return cls.from_config(A , return_unused_kwargs=A , **A ) def UpperCAmelCase ( self , A , A = False , **A ) -> Union[str, Any]: self.save_config(save_directory=A , push_to_hub=A , **A ) @property def UpperCAmelCase ( self ) -> Optional[Any]: return self._get_compatibles() @classmethod def UpperCAmelCase ( cls ) -> Tuple: snake_case : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) ) snake_case : Optional[Any] = importlib.import_module(__name__.split(""".""" )[0] ) snake_case : int = [ getattr(A , A ) for c in compatible_classes_str if hasattr(A , A ) ] return compatible_classes
703
lowerCamelCase : Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' lowerCamelCase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}] lowerCamelCase : Union[str, Any] = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
684
0
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) A_ : Union[str, Any] = logging.get_logger(__name__) A_ : str = OrderedDict( [ ('align', 'EfficientNetImageProcessor'), ('beit', 'BeitImageProcessor'), ('bit', 'BitImageProcessor'), ('blip', 'BlipImageProcessor'), ('blip-2', 'BlipImageProcessor'), ('bridgetower', 'BridgeTowerImageProcessor'), ('chinese_clip', 'ChineseCLIPImageProcessor'), ('clip', 'CLIPImageProcessor'), ('clipseg', 'ViTImageProcessor'), ('conditional_detr', 'ConditionalDetrImageProcessor'), ('convnext', 'ConvNextImageProcessor'), ('convnextv2', 'ConvNextImageProcessor'), ('cvt', 'ConvNextImageProcessor'), ('data2vec-vision', 'BeitImageProcessor'), ('deformable_detr', 'DeformableDetrImageProcessor'), ('deit', 'DeiTImageProcessor'), ('deta', 'DetaImageProcessor'), ('detr', 'DetrImageProcessor'), ('dinat', 'ViTImageProcessor'), ('donut-swin', 'DonutImageProcessor'), ('dpt', 'DPTImageProcessor'), ('efficientformer', 'EfficientFormerImageProcessor'), ('efficientnet', 'EfficientNetImageProcessor'), ('flava', 'FlavaImageProcessor'), ('focalnet', 'BitImageProcessor'), ('git', 'CLIPImageProcessor'), ('glpn', 'GLPNImageProcessor'), ('groupvit', 'CLIPImageProcessor'), ('imagegpt', 'ImageGPTImageProcessor'), ('instructblip', 'BlipImageProcessor'), ('layoutlmv2', 'LayoutLMv2ImageProcessor'), ('layoutlmv3', 'LayoutLMv3ImageProcessor'), ('levit', 'LevitImageProcessor'), ('mask2former', 'Mask2FormerImageProcessor'), ('maskformer', 'MaskFormerImageProcessor'), ('mgp-str', 'ViTImageProcessor'), ('mobilenet_v1', 'MobileNetV1ImageProcessor'), ('mobilenet_v2', 'MobileNetV2ImageProcessor'), ('mobilevit', 'MobileViTImageProcessor'), ('mobilevit', 'MobileViTImageProcessor'), ('mobilevitv2', 'MobileViTImageProcessor'), ('nat', 'ViTImageProcessor'), ('oneformer', 'OneFormerImageProcessor'), ('owlvit', 'OwlViTImageProcessor'), ('perceiver', 'PerceiverImageProcessor'), ('pix2struct', 'Pix2StructImageProcessor'), ('poolformer', 'PoolFormerImageProcessor'), ('regnet', 'ConvNextImageProcessor'), ('resnet', 'ConvNextImageProcessor'), ('sam', 'SamImageProcessor'), ('segformer', 'SegformerImageProcessor'), ('swiftformer', 'ViTImageProcessor'), ('swin', 'ViTImageProcessor'), ('swin2sr', 'Swin2SRImageProcessor'), ('swinv2', 'ViTImageProcessor'), ('table-transformer', 'DetrImageProcessor'), ('timesformer', 'VideoMAEImageProcessor'), ('tvlt', 'TvltImageProcessor'), ('upernet', 'SegformerImageProcessor'), ('van', 'ConvNextImageProcessor'), ('videomae', 'VideoMAEImageProcessor'), ('vilt', 'ViltImageProcessor'), ('vit', 'ViTImageProcessor'), ('vit_hybrid', 'ViTHybridImageProcessor'), ('vit_mae', 'ViTImageProcessor'), ('vit_msn', 'ViTImageProcessor'), ('xclip', 'CLIPImageProcessor'), ('yolos', 'YolosImageProcessor'), ] ) A_ : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def __a ( SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: __UpperCAmelCase = model_type_to_module_name(SCREAMING_SNAKE_CASE ) __UpperCAmelCase = importlib.import_module(f'''.{module_name}''' , '''transformers.models''' ) try: return getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(SCREAMING_SNAKE_CASE , '''__name__''' , SCREAMING_SNAKE_CASE ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. __UpperCAmelCase = importlib.import_module('''transformers''' ) if hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): return getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return None def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , **SCREAMING_SNAKE_CASE , ) -> List[Any]: '''simple docstring''' __UpperCAmelCase = get_file_from_repo( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE , resume_download=SCREAMING_SNAKE_CASE , proxies=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE , local_files_only=SCREAMING_SNAKE_CASE , ) if resolved_config_file is None: logger.info( '''Could not locate the image processor configuration file, will try to use the model config instead.''' ) return {} with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as reader: return json.load(SCREAMING_SNAKE_CASE ) class A_ : '''simple docstring''' def __init__(self ) -> Tuple: raise EnvironmentError( '''AutoImageProcessor is designed to be instantiated ''' '''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' ) @classmethod @replace_list_option_in_docstrings(lowercase__ ) def lowerCAmelCase_ (cls , lowercase__ , **lowercase__ ) -> List[Any]: __UpperCAmelCase = kwargs.pop('''config''' , lowercase__ ) __UpperCAmelCase = kwargs.pop('''trust_remote_code''' , lowercase__ ) __UpperCAmelCase = True __UpperCAmelCase , __UpperCAmelCase = ImageProcessingMixin.get_image_processor_dict(lowercase__ , **lowercase__ ) __UpperCAmelCase = config_dict.get('''image_processor_type''' , lowercase__ ) __UpperCAmelCase = None if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ): __UpperCAmelCase = config_dict['''auto_map''']['''AutoImageProcessor'''] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: __UpperCAmelCase = config_dict.pop('''feature_extractor_type''' , lowercase__ ) if feature_extractor_class is not None: logger.warning( '''Could not find image processor class in the image processor config or the model config. Loading''' ''' based on pattern matching with the model\'s feature extractor configuration.''' ) __UpperCAmelCase = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' ) if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ): __UpperCAmelCase = config_dict['''auto_map''']['''AutoFeatureExtractor'''] __UpperCAmelCase = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' ) logger.warning( '''Could not find image processor auto map in the image processor config or the model config.''' ''' Loading based on pattern matching with the model\'s feature extractor configuration.''' ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(lowercase__ , lowercase__ ): __UpperCAmelCase = AutoConfig.from_pretrained(lowercase__ , **lowercase__ ) # It could be in `config.image_processor_type`` __UpperCAmelCase = getattr(lowercase__ , '''image_processor_type''' , lowercase__ ) if hasattr(lowercase__ , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map: __UpperCAmelCase = config.auto_map['''AutoImageProcessor'''] if image_processor_class is not None: __UpperCAmelCase = image_processor_class_from_name(lowercase__ ) __UpperCAmelCase = image_processor_auto_map is not None __UpperCAmelCase = image_processor_class is not None or type(lowercase__ ) in IMAGE_PROCESSOR_MAPPING __UpperCAmelCase = resolve_trust_remote_code( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) if has_remote_code and trust_remote_code: __UpperCAmelCase = get_class_from_dynamic_module( lowercase__ , lowercase__ , **lowercase__ ) __UpperCAmelCase = kwargs.pop('''code_revision''' , lowercase__ ) if os.path.isdir(lowercase__ ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(lowercase__ , **lowercase__ ) elif image_processor_class is not None: return image_processor_class.from_dict(lowercase__ , **lowercase__ ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(lowercase__ ) in IMAGE_PROCESSOR_MAPPING: __UpperCAmelCase = IMAGE_PROCESSOR_MAPPING[type(lowercase__ )] return image_processor_class.from_dict(lowercase__ , **lowercase__ ) raise ValueError( F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a ''' F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following ''' F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def lowerCAmelCase_ (lowercase__ , lowercase__ ) -> str: IMAGE_PROCESSOR_MAPPING.register(lowercase__ , lowercase__ )
303
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
303
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { '''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' a__ : int = """donut-swin""" a__ : Union[str, Any] = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self , __lowercase=224 , __lowercase=4 , __lowercase=3 , __lowercase=96 , __lowercase=[2, 2, 6, 2] , __lowercase=[3, 6, 12, 24] , __lowercase=7 , __lowercase=4.0 , __lowercase=True , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase="gelu" , __lowercase=False , __lowercase=0.02 , __lowercase=1E-5 , **__lowercase , ) -> str: super().__init__(**__lowercase) __UpperCamelCase :List[str] = image_size __UpperCamelCase :List[str] = patch_size __UpperCamelCase :Any = num_channels __UpperCamelCase :Optional[Any] = embed_dim __UpperCamelCase :Optional[Any] = depths __UpperCamelCase :Optional[int] = len(__lowercase) __UpperCamelCase :Any = num_heads __UpperCamelCase :Union[str, Any] = window_size __UpperCamelCase :str = mlp_ratio __UpperCamelCase :Any = qkv_bias __UpperCamelCase :Tuple = hidden_dropout_prob __UpperCamelCase :List[Any] = attention_probs_dropout_prob __UpperCamelCase :List[Any] = drop_path_rate __UpperCamelCase :str = hidden_act __UpperCamelCase :List[Any] = use_absolute_embeddings __UpperCamelCase :List[str] = layer_norm_eps __UpperCamelCase :Optional[int] = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __UpperCamelCase :Union[str, Any] = int(embed_dim * 2 ** (len(__lowercase) - 1))
704
import qiskit def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Dict = qiskit.Aer.get_backend('''aer_simulator''' ) __UpperCamelCase :Tuple = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator __UpperCamelCase :Optional[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 ) # Return the histogram data of the results of the experiment return job.result().get_counts(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __lowercase = half_adder(1, 1) print(F'Half Adder Output Qubit Counts: {counts}')
452
0
"""simple docstring""" from __future__ import annotations from typing import TypedDict class lowercase__ ( SCREAMING_SNAKE_CASE ): '''simple docstring''' UpperCamelCase = 42 UpperCamelCase = 42 def a__ ( lowerCAmelCase__ ): if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise TypeError("The parameter s type must be str." ) return [s[i:] + s[:i] for i in range(len(lowerCAmelCase__ ) )] def a__ ( lowerCAmelCase__ ): if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise TypeError("The parameter s type must be str." ) if not s: raise ValueError("The parameter s must not be empty." ) UpperCAmelCase_ = all_rotations(lowerCAmelCase__ ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation UpperCAmelCase_ = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(lowerCAmelCase__ ), } return response def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ): if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise TypeError("The parameter bwt_string type must be str." ) if not bwt_string: raise ValueError("The parameter bwt_string must not be empty." ) try: UpperCAmelCase_ = int(lowerCAmelCase__ ) except ValueError: raise TypeError( "The parameter idx_original_string type must be int or passive" " of cast to int." ) if idx_original_string < 0: raise ValueError("The parameter idx_original_string must not be lower than 0." ) if idx_original_string >= len(lowerCAmelCase__ ): raise ValueError( "The parameter idx_original_string must be lower than" " len(bwt_string)." ) UpperCAmelCase_ = [""] * len(lowerCAmelCase__ ) for _ in range(len(lowerCAmelCase__ ) ): for i in range(len(lowerCAmelCase__ ) ): UpperCAmelCase_ = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": lowerCamelCase = """Provide a string that I will generate its BWT transform: """ lowerCamelCase = input(entry_msg).strip() lowerCamelCase = bwt_transform(s) print( F"Burrows Wheeler transform for string '{s}' results " F"in '{result['bwt_string']}'" ) lowerCamelCase = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""]) print( F"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' " F"we get original string '{original_string}'" )
82
import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : Any ,A : List[str] ,A : str=7 ,A : Optional[Any]=3 ,A : Any=18 ,A : int=30 ,A : int=4_00 ,A : List[str]=True ,A : Union[str, Any]=None ,A : Union[str, Any]=True ,A : Tuple=None ,A : Tuple=True ,A : Union[str, Any]=[0.5, 0.5, 0.5] ,A : str=[0.5, 0.5, 0.5] ,A : List[Any]=False ,): __A = size if size is not None else {"height": 20, "width": 20} __A = crop_size if crop_size is not None else {"height": 18, "width": 18} __A = parent __A = batch_size __A = num_channels __A = image_size __A = min_resolution __A = max_resolution __A = do_resize __A = size __A = do_center_crop __A = crop_size __A = do_normalize __A = image_mean __A = image_std __A = do_reduce_labels def UpperCamelCase_ ( self : List[str] ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_reduce_labels": self.do_reduce_labels, } def UpperCAmelCase ( ) -> int: """simple docstring""" __A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) __A = Image.open(dataset[0]["file"] ) __A = Image.open(dataset[1]["file"] ) return image, map def UpperCAmelCase ( ) -> Optional[int]: """simple docstring""" __A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) __A = Image.open(ds[0]["file"] ) __A = Image.open(ds[1]["file"] ) __A = Image.open(ds[2]["file"] ) __A = Image.open(ds[3]["file"] ) return [imagea, imagea], [mapa, mapa] @require_torch @require_vision class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' snake_case_ = BeitImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self : List[Any] ): __A = BeitImageProcessingTester(self ) @property def UpperCamelCase_ ( self : List[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self : int ): __A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A ,"do_resize" ) ) self.assertTrue(hasattr(A ,"size" ) ) self.assertTrue(hasattr(A ,"do_center_crop" ) ) self.assertTrue(hasattr(A ,"center_crop" ) ) self.assertTrue(hasattr(A ,"do_normalize" ) ) self.assertTrue(hasattr(A ,"image_mean" ) ) self.assertTrue(hasattr(A ,"image_std" ) ) def UpperCamelCase_ ( self : List[str] ): __A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"height": 20, "width": 20} ) self.assertEqual(image_processor.crop_size ,{"height": 18, "width": 18} ) self.assertEqual(image_processor.do_reduce_labels ,A ) __A = self.image_processing_class.from_dict( self.image_processor_dict ,size=42 ,crop_size=84 ,reduce_labels=A ) self.assertEqual(image_processor.size ,{"height": 42, "width": 42} ) self.assertEqual(image_processor.crop_size ,{"height": 84, "width": 84} ) self.assertEqual(image_processor.do_reduce_labels ,A ) def UpperCamelCase_ ( self : List[Any] ): pass def UpperCamelCase_ ( self : Optional[int] ): # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A ,Image.Image ) # Test not batched input __A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) ,) # Test batched __A = image_processing(A ,return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) ,) def UpperCamelCase_ ( self : List[str] ): # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,numpify=A ) for image in image_inputs: self.assertIsInstance(A ,np.ndarray ) # Test not batched input __A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) ,) # Test batched __A = image_processing(A ,return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) ,) def UpperCamelCase_ ( self : int ): # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A ) for image in image_inputs: self.assertIsInstance(A ,torch.Tensor ) # Test not batched input __A = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) ,) # Test batched __A = image_processing(A ,return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) ,) def UpperCamelCase_ ( self : str ): # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A ,torchify=A ) __A = [] for image in image_inputs: self.assertIsInstance(A ,torch.Tensor ) maps.append(torch.zeros(image.shape[-2:] ).long() ) # Test not batched input __A = image_processing(image_inputs[0] ,maps[0] ,return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) ,) self.assertEqual( encoding["labels"].shape ,( 1, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) ,) self.assertEqual(encoding["labels"].dtype ,torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 2_55 ) # Test batched __A = image_processing(A ,A ,return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) ,) self.assertEqual( encoding["labels"].shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) ,) self.assertEqual(encoding["labels"].dtype ,torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 2_55 ) # Test not batched input (PIL images) __A , __A = prepare_semantic_single_inputs() __A = image_processing(A ,A ,return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) ,) self.assertEqual( encoding["labels"].shape ,( 1, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) ,) self.assertEqual(encoding["labels"].dtype ,torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 2_55 ) # Test batched input (PIL images) __A , __A = prepare_semantic_batch_inputs() __A = image_processing(A ,A ,return_tensors="pt" ) self.assertEqual( encoding["pixel_values"].shape ,( 2, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) ,) self.assertEqual( encoding["labels"].shape ,( 2, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) ,) self.assertEqual(encoding["labels"].dtype ,torch.long ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 2_55 ) def UpperCamelCase_ ( self : Dict ): # Initialize image_processing __A = self.image_processing_class(**self.image_processor_dict ) # ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150 __A , __A = prepare_semantic_single_inputs() __A = image_processing(A ,A ,return_tensors="pt" ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 1_50 ) __A = True __A = image_processing(A ,A ,return_tensors="pt" ) self.assertTrue(encoding["labels"].min().item() >= 0 ) self.assertTrue(encoding["labels"].max().item() <= 2_55 )
55
0
from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __UpperCAmelCase : List[str] = logging.get_logger(__name__) class __snake_case ( __lowerCamelCase ): '''simple docstring''' lowerCAmelCase__ = ["""pixel_values"""] def __init__( self : List[str] , A : bool = True , A : int = 32 , A : Optional[Any]=PILImageResampling.BILINEAR , A : bool = True , **A : Optional[int] , ): __snake_case: str = do_resize __snake_case: int = do_rescale __snake_case: Dict = size_divisor __snake_case: Tuple = resample super().__init__(**A ) def UpperCAmelCase__ ( self : Dict , A : np.ndarray , A : int , A : List[str] , A : Optional[ChannelDimension] = None , **A : List[str] ): __snake_case , __snake_case: List[Any] = get_image_size(A ) # Rounds the height and width down to the closest multiple of size_divisor __snake_case: Optional[Any] = height // size_divisor * size_divisor __snake_case: Tuple = width // size_divisor * size_divisor __snake_case: Optional[int] = resize(A , (new_h, new_w) , resample=A , data_format=A , **A ) return image def UpperCAmelCase__ ( self : Union[str, Any] , A : np.ndarray , A : float , A : Optional[ChannelDimension] = None , **A : str ): return rescale(image=A , scale=A , data_format=A , **A ) def UpperCAmelCase__ ( self : Optional[Any] , A : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , A : Optional[bool] = None , A : Optional[int] = None , A : List[Any]=None , A : Optional[bool] = None , A : Optional[Union[TensorType, str]] = None , A : ChannelDimension = ChannelDimension.FIRST , **A : Tuple , ): __snake_case: Dict = do_resize if do_resize is not None else self.do_resize __snake_case: List[str] = do_rescale if do_rescale is not None else self.do_rescale __snake_case: str = size_divisor if size_divisor is not None else self.size_divisor __snake_case: Tuple = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError("""size_divisor is required for resizing""" ) __snake_case: Union[str, Any] = make_list_of_images(A ) if not valid_images(A ): raise ValueError("""Invalid image(s)""" ) # All transformations expect numpy arrays. __snake_case: Optional[int] = [to_numpy_array(A ) for img in images] if do_resize: __snake_case: Tuple = [self.resize(A , size_divisor=A , resample=A ) for image in images] if do_rescale: __snake_case: Dict = [self.rescale(A , scale=1 / 255 ) for image in images] __snake_case: List[str] = [to_channel_dimension_format(A , A ) for image in images] __snake_case: Dict = {"""pixel_values""": images} return BatchFeature(data=A , tensor_type=A )
155
def A__ ( SCREAMING_SNAKE_CASE__ = 1000) -> int: __snake_case , __snake_case: Dict = 1, 1 __snake_case: int = 2 while True: __snake_case: str = 0 __snake_case: Any = fa + fa __snake_case , __snake_case: Tuple = fa, f index += 1 for _ in str(SCREAMING_SNAKE_CASE__): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
155
1
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __UpperCamelCase : Any = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __UpperCamelCase : Tuple = [file for file in filepaths if file != file.lower()] if upper_files: print(F'''{len(upper_files)} files contain uppercase characters:''') print("""\n""".join(upper_files) + """\n""") __UpperCamelCase : Dict = [file for file in filepaths if """ """ in file] if space_files: print(F'''{len(space_files)} files contain space characters:''') print("""\n""".join(space_files) + """\n""") __UpperCamelCase : Optional[Any] = [file for file in filepaths if """-""" in file] if hyphen_files: print(F'''{len(hyphen_files)} files contain hyphen characters:''') print("""\n""".join(hyphen_files) + """\n""") __UpperCamelCase : List[str] = [file for file in filepaths if os.sep not in file] if nodir_files: print(F'''{len(nodir_files)} files are not in a directory:''') print("""\n""".join(nodir_files) + """\n""") __UpperCamelCase : List[str] = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
80
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=3_0 , __UpperCAmelCase=4_0_0 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=True , __UpperCAmelCase=1 / 2_5_5 , __UpperCAmelCase=True , ): '''simple docstring''' lowerCAmelCase__ :Any = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} lowerCAmelCase__ :List[Any] = parent lowerCAmelCase__ :int = batch_size lowerCAmelCase__ :Union[str, Any] = num_channels lowerCAmelCase__ :Any = min_resolution lowerCAmelCase__ :Dict = max_resolution lowerCAmelCase__ :Dict = do_resize lowerCAmelCase__ :Optional[Any] = size lowerCAmelCase__ :List[str] = do_normalize lowerCAmelCase__ :str = image_mean lowerCAmelCase__ :Tuple = image_std lowerCAmelCase__ :Dict = do_rescale lowerCAmelCase__ :Tuple = rescale_factor lowerCAmelCase__ :Optional[int] = do_pad def snake_case ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' if not batched: lowerCAmelCase__ :str = image_inputs[0] if isinstance(__UpperCAmelCase , Image.Image ): lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = image.size else: lowerCAmelCase__ , lowerCAmelCase__ :str = image.shape[1], image.shape[2] if w < h: lowerCAmelCase__ :int = int(self.size['shortest_edge'] * h / w ) lowerCAmelCase__ :List[str] = self.size['shortest_edge'] elif w > h: lowerCAmelCase__ :Union[str, Any] = self.size['shortest_edge'] lowerCAmelCase__ :Any = int(self.size['shortest_edge'] * w / h ) else: lowerCAmelCase__ :int = self.size['shortest_edge'] lowerCAmelCase__ :Union[str, Any] = self.size['shortest_edge'] else: lowerCAmelCase__ :Optional[Any] = [] for image in image_inputs: lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCAmelCase__ :List[str] = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[0] )[0] lowerCAmelCase__ :List[Any] = max(__UpperCAmelCase , key=lambda __UpperCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :Any = DetaImageProcessor if is_vision_available() else None def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :int = DetaImageProcessingTester(self ) @property def snake_case ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCAmelCase , 'image_mean' ) ) self.assertTrue(hasattr(__UpperCAmelCase , 'image_std' ) ) self.assertTrue(hasattr(__UpperCAmelCase , 'do_normalize' ) ) self.assertTrue(hasattr(__UpperCAmelCase , 'do_resize' ) ) self.assertTrue(hasattr(__UpperCAmelCase , 'do_rescale' ) ) self.assertTrue(hasattr(__UpperCAmelCase , 'do_pad' ) ) self.assertTrue(hasattr(__UpperCAmelCase , 'size' ) ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} ) self.assertEqual(image_processor.do_pad , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' pass def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , Image.Image ) # Test not batched input lowerCAmelCase__ :Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(__UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase__ , lowerCAmelCase__ :Tuple = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase__ :Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , np.ndarray ) # Test not batched input lowerCAmelCase__ :List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase__ :Tuple = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ :List[str] = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase__ :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , torch.Tensor ) # Test not batched input lowerCAmelCase__ :Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(__UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowerCAmelCase__ :str = image_processing(__UpperCAmelCase , return_tensors='pt' ).pixel_values lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase , batched=__UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: lowerCAmelCase__ :Dict = json.loads(f.read() ) lowerCAmelCase__ :int = {'image_id': 3_9_7_6_9, 'annotations': target} # encode them lowerCAmelCase__ :int = DetaImageProcessor() lowerCAmelCase__ :List[Any] = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , return_tensors='pt' ) # verify pixel values lowerCAmelCase__ :str = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding['pixel_values'].shape , __UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) ) # verify area lowerCAmelCase__ :Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCAmelCase ) ) # verify boxes lowerCAmelCase__ :Tuple = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCAmelCase ) lowerCAmelCase__ :Dict = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCAmelCase , atol=1E-3 ) ) # verify image_id lowerCAmelCase__ :Any = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCAmelCase ) ) # verify is_crowd lowerCAmelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCAmelCase ) ) # verify class_labels lowerCAmelCase__ :Optional[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCAmelCase ) ) # verify orig_size lowerCAmelCase__ :str = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCAmelCase ) ) # verify size lowerCAmelCase__ :Any = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCAmelCase ) ) @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: lowerCAmelCase__ :Dict = json.loads(f.read() ) lowerCAmelCase__ :Dict = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target} lowerCAmelCase__ :Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them lowerCAmelCase__ :Dict = DetaImageProcessor(format='coco_panoptic' ) lowerCAmelCase__ :Optional[int] = image_processing(images=__UpperCAmelCase , annotations=__UpperCAmelCase , masks_path=__UpperCAmelCase , return_tensors='pt' ) # verify pixel values lowerCAmelCase__ :str = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding['pixel_values'].shape , __UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , __UpperCAmelCase , atol=1E-4 ) ) # verify area lowerCAmelCase__ :Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , __UpperCAmelCase ) ) # verify boxes lowerCAmelCase__ :int = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , __UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , __UpperCAmelCase , atol=1E-3 ) ) # verify image_id lowerCAmelCase__ :Optional[int] = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , __UpperCAmelCase ) ) # verify is_crowd lowerCAmelCase__ :Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , __UpperCAmelCase ) ) # verify class_labels lowerCAmelCase__ :List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , __UpperCAmelCase ) ) # verify masks lowerCAmelCase__ :Optional[int] = 8_2_2_8_7_3 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , __UpperCAmelCase ) # verify orig_size lowerCAmelCase__ :Optional[int] = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , __UpperCAmelCase ) ) # verify size lowerCAmelCase__ :Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , __UpperCAmelCase ) )
93
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class lowercase ( unittest.TestCase): '''simple docstring''' def __init__( self : Optional[Any] , snake_case : List[Any] , snake_case : Tuple=7 , snake_case : Optional[int]=3 , snake_case : Union[str, Any]=18 , snake_case : Optional[Any]=30 , snake_case : str=400 , snake_case : List[Any]=True , snake_case : Dict=None , snake_case : Union[str, Any]=True , snake_case : int=None , snake_case : List[str]=True , snake_case : Union[str, Any]=[0.48145466, 0.4578275, 0.40821073] , snake_case : Union[str, Any]=[0.26862954, 0.26130258, 0.27577711] , snake_case : str=True , ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = size if size is not None else {'height': 224, 'width': 224} SCREAMING_SNAKE_CASE : Dict = crop_size if crop_size is not None else {'height': 18, 'width': 18} SCREAMING_SNAKE_CASE : Union[str, Any] = parent SCREAMING_SNAKE_CASE : Optional[Any] = batch_size SCREAMING_SNAKE_CASE : Dict = num_channels SCREAMING_SNAKE_CASE : Union[str, Any] = image_size SCREAMING_SNAKE_CASE : Union[str, Any] = min_resolution SCREAMING_SNAKE_CASE : Dict = max_resolution SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize SCREAMING_SNAKE_CASE : Tuple = size SCREAMING_SNAKE_CASE : List[str] = do_center_crop SCREAMING_SNAKE_CASE : List[Any] = crop_size SCREAMING_SNAKE_CASE : str = do_normalize SCREAMING_SNAKE_CASE : List[Any] = image_mean SCREAMING_SNAKE_CASE : Optional[int] = image_std SCREAMING_SNAKE_CASE : Union[str, Any] = do_convert_rgb def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def lowerCamelCase_ ( self : str , snake_case : Optional[int]=False , snake_case : str=False , snake_case : Optional[Any]=False ): '''simple docstring''' assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: SCREAMING_SNAKE_CASE : List[str] = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: SCREAMING_SNAKE_CASE : Tuple = [] for i in range(self.batch_size ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension SCREAMING_SNAKE_CASE : List[str] = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs] if torchify: SCREAMING_SNAKE_CASE : Optional[Any] = [torch.from_numpy(snake_case ) for x in image_inputs] return image_inputs @require_torch @require_vision class lowercase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase): '''simple docstring''' UpperCAmelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = ChineseCLIPImageProcessingTester(self , do_center_crop=snake_case ) @property def lowerCamelCase_ ( self : Any ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case , 'do_resize' ) ) self.assertTrue(hasattr(snake_case , 'size' ) ) self.assertTrue(hasattr(snake_case , 'do_center_crop' ) ) self.assertTrue(hasattr(snake_case , 'center_crop' ) ) self.assertTrue(hasattr(snake_case , 'do_normalize' ) ) self.assertTrue(hasattr(snake_case , 'image_mean' ) ) self.assertTrue(hasattr(snake_case , 'image_std' ) ) self.assertTrue(hasattr(snake_case , 'do_convert_rgb' ) ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 224, 'width': 224} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) SCREAMING_SNAKE_CASE : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' pass def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE : int = image_processing(snake_case , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case , numpify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE : List[Any] = image_processing(snake_case , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case , torchify=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE : Dict = image_processing(snake_case , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) @require_torch @require_vision class lowercase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase): '''simple docstring''' UpperCAmelCase : str = ChineseCLIPImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=snake_case ) SCREAMING_SNAKE_CASE : Dict = 3 @property def lowerCamelCase_ ( self : Any ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case , 'do_resize' ) ) self.assertTrue(hasattr(snake_case , 'size' ) ) self.assertTrue(hasattr(snake_case , 'do_center_crop' ) ) self.assertTrue(hasattr(snake_case , 'center_crop' ) ) self.assertTrue(hasattr(snake_case , 'do_normalize' ) ) self.assertTrue(hasattr(snake_case , 'image_mean' ) ) self.assertTrue(hasattr(snake_case , 'image_std' ) ) self.assertTrue(hasattr(snake_case , 'do_convert_rgb' ) ) def lowerCamelCase_ ( self : int ): '''simple docstring''' pass def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : Any = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case ) for image in image_inputs: self.assertIsInstance(snake_case , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE : Dict = image_processing(snake_case , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
716
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> list[int]: SCREAMING_SNAKE_CASE : Optional[Any] = int(__lowerCAmelCase ) # Initialize Result SCREAMING_SNAKE_CASE : int = [] # Traverse through all denomination for denomination in reversed(__lowerCAmelCase ): # Find denominations while int(__lowerCAmelCase ) >= int(__lowerCAmelCase ): total_value -= int(__lowerCAmelCase ) answer.append(__lowerCAmelCase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": _lowerCamelCase : Union[str, Any] = [] _lowerCamelCase : str = """0""" if ( input("""Do you want to enter your denominations ? (yY/n): """).strip().lower() == "y" ): _lowerCamelCase : Tuple = int(input("""Enter the number of denominations you want to add: """).strip()) for i in range(0, n): denominations.append(int(input(f"""Denomination {i}: """).strip())) _lowerCamelCase : Optional[Any] = input("""Enter the change you want to make in Indian Currency: """).strip() else: # All denominations of Indian Currency if user does not enter _lowerCamelCase : List[str] = [1, 2, 5, 10, 20, 50, 100, 500, 2_000] _lowerCamelCase : Dict = input("""Enter the change you want to make: """).strip() if int(value) == 0 or int(value) < 0: print("""The total value cannot be zero or negative.""") else: print(f"""Following is minimal change for {value}: """) _lowerCamelCase : List[str] = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=""" """)
308
0
"""simple docstring""" UpperCamelCase = """ # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git """ UpperCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}] UpperCamelCase = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
104
import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : Any = logging.get_logger(__name__) a__ : Dict = { """google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""", """google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""", """google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""", } class lowercase ( UpperCAmelCase_ ): """simple docstring""" snake_case_ = 'owlvit_text_model' def __init__( self : Optional[Any] , a_ : Optional[int]=4_94_08 , a_ : Optional[int]=5_12 , a_ : str=20_48 , a_ : Union[str, Any]=12 , a_ : Optional[Any]=8 , a_ : str=16 , a_ : Optional[int]="quick_gelu" , a_ : Optional[Any]=1e-5 , a_ : Optional[Any]=0.0 , a_ : List[str]=0.0_2 , a_ : Optional[int]=1.0 , a_ : str=0 , a_ : int=4_94_06 , a_ : int=4_94_07 , **a_ : Union[str, Any] , ): """simple docstring""" super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ ) lowerCamelCase__ = vocab_size lowerCamelCase__ = hidden_size lowerCamelCase__ = intermediate_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = hidden_act lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = attention_dropout lowerCamelCase__ = initializer_range lowerCamelCase__ = initializer_factor @classmethod def _UpperCamelCase ( cls : Optional[int] , a_ : Union[str, os.PathLike] , **a_ : int ): """simple docstring""" cls._set_token_in_kwargs(a_ ) lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(a_ , **a_ ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": lowerCamelCase__ = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(a_ , **a_ ) class lowercase ( UpperCAmelCase_ ): """simple docstring""" snake_case_ = 'owlvit_vision_model' def __init__( self : List[Any] , a_ : Tuple=7_68 , a_ : Dict=30_72 , a_ : List[str]=12 , a_ : Tuple=12 , a_ : Optional[Any]=3 , a_ : Dict=7_68 , a_ : Optional[int]=32 , a_ : int="quick_gelu" , a_ : Dict=1e-5 , a_ : Any=0.0 , a_ : str=0.0_2 , a_ : Optional[int]=1.0 , **a_ : str , ): """simple docstring""" super().__init__(**a_ ) lowerCamelCase__ = hidden_size lowerCamelCase__ = intermediate_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = num_channels lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = hidden_act lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = attention_dropout lowerCamelCase__ = initializer_range lowerCamelCase__ = initializer_factor @classmethod def _UpperCamelCase ( cls : int , a_ : Union[str, os.PathLike] , **a_ : Any ): """simple docstring""" cls._set_token_in_kwargs(a_ ) lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(a_ , **a_ ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": lowerCamelCase__ = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(a_ , **a_ ) class lowercase ( UpperCAmelCase_ ): """simple docstring""" snake_case_ = 'owlvit' snake_case_ = True def __init__( self : Optional[int] , a_ : List[Any]=None , a_ : List[str]=None , a_ : str=5_12 , a_ : Optional[Any]=2.6_5_9_2 , a_ : Union[str, Any]=True , **a_ : Optional[Any] , ): """simple docstring""" super().__init__(**a_ ) if text_config is None: lowerCamelCase__ = {} logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" ) if vision_config is None: lowerCamelCase__ = {} logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" ) lowerCamelCase__ = OwlViTTextConfig(**a_ ) lowerCamelCase__ = OwlViTVisionConfig(**a_ ) lowerCamelCase__ = projection_dim lowerCamelCase__ = logit_scale_init_value lowerCamelCase__ = return_dict lowerCamelCase__ = 1.0 @classmethod def _UpperCamelCase ( cls : int , a_ : Union[str, os.PathLike] , **a_ : Optional[Any] ): """simple docstring""" cls._set_token_in_kwargs(a_ ) lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(a_ , **a_ ) if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(a_ , **a_ ) @classmethod def _UpperCamelCase ( cls : Optional[int] , a_ : Dict , a_ : Dict , **a_ : Dict ): """simple docstring""" lowerCamelCase__ = {} lowerCamelCase__ = text_config lowerCamelCase__ = vision_config return cls.from_dict(a_ , **a_ ) def _UpperCamelCase ( self : List[str] ): """simple docstring""" lowerCamelCase__ = copy.deepcopy(self.__dict__ ) lowerCamelCase__ = self.text_config.to_dict() lowerCamelCase__ = self.vision_config.to_dict() lowerCamelCase__ = self.__class__.model_type return output class lowercase ( UpperCAmelCase_ ): """simple docstring""" @property def _UpperCamelCase ( self : int ): """simple docstring""" return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ] ) @property def _UpperCamelCase ( self : Dict ): """simple docstring""" return OrderedDict( [ ("""logits_per_image""", {0: """batch"""}), ("""logits_per_text""", {0: """batch"""}), ("""text_embeds""", {0: """batch"""}), ("""image_embeds""", {0: """batch"""}), ] ) @property def _UpperCamelCase ( self : str ): """simple docstring""" return 1e-4 def _UpperCamelCase ( self : Any , a_ : "ProcessorMixin" , a_ : int = -1 , a_ : int = -1 , a_ : Optional["TensorType"] = None , ): """simple docstring""" lowerCamelCase__ = super().generate_dummy_inputs( processor.tokenizer , batch_size=a_ , seq_length=a_ , framework=a_ ) lowerCamelCase__ = super().generate_dummy_inputs( processor.image_processor , batch_size=a_ , framework=a_ ) return {**text_input_dict, **image_input_dict} @property def _UpperCamelCase ( self : Optional[Any] ): """simple docstring""" return 14
165
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class snake_case ( unittest.TestCase ): """simple docstring""" @slow def snake_case ( self ): """simple docstring""" lowerCamelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) lowerCamelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" ) lowerCamelCase_ = tokenizer("Hello there" , return_tensors="tf" ).input_ids lowerCamelCase_ = tokenizer("Hi I am" , return_tensors="tf" ).input_ids lowerCamelCase_ = model(UpperCamelCase , labels=UpperCamelCase ).loss lowerCamelCase_ = -tf.math.reduce_mean(UpperCamelCase ).numpy() lowerCamelCase_ = -21.228_168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
445
'''simple docstring''' from __future__ import annotations import pandas as pd def __snake_case ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int ): lowerCamelCase_ = [0] * no_of_processes lowerCamelCase_ = [0] * no_of_processes # Copy the burst time into remaining_time[] for i in range(UpperCAmelCase_ ): lowerCamelCase_ = burst_time[i] lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 999999999 lowerCamelCase_ = 0 lowerCamelCase_ = False # Process until all processes are completed while complete != no_of_processes: for j in range(UpperCAmelCase_ ): if arrival_time[j] <= increment_time and remaining_time[j] > 0: if remaining_time[j] < minm: lowerCamelCase_ = remaining_time[j] lowerCamelCase_ = j lowerCamelCase_ = True if not check: increment_time += 1 continue remaining_time[short] -= 1 lowerCamelCase_ = remaining_time[short] if minm == 0: lowerCamelCase_ = 999999999 if remaining_time[short] == 0: complete += 1 lowerCamelCase_ = False # Find finish time of current process lowerCamelCase_ = increment_time + 1 # Calculate waiting time lowerCamelCase_ = finish_time - arrival_time[short] lowerCamelCase_ = finar - burst_time[short] if waiting_time[short] < 0: lowerCamelCase_ = 0 # Increment time increment_time += 1 return waiting_time def __snake_case ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : list[int] ): lowerCamelCase_ = [0] * no_of_processes for i in range(UpperCAmelCase_ ): lowerCamelCase_ = burst_time[i] + waiting_time[i] return turn_around_time def __snake_case ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int ): lowerCamelCase_ = 0 lowerCamelCase_ = 0 for i in range(UpperCAmelCase_ ): lowerCamelCase_ = total_waiting_time + waiting_time[i] lowerCamelCase_ = total_turn_around_time + turn_around_time[i] print(F'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' ) print("Average turn around time =" , total_turn_around_time / no_of_processes ) if __name__ == "__main__": print("""Enter how many process you want to analyze""") a_ : Dict = int(input()) a_ : Any = [0] * no_of_processes a_ : Optional[int] = [0] * no_of_processes a_ : Tuple = list(range(1, no_of_processes + 1)) for i in range(no_of_processes): print("""Enter the arrival time and burst time for process:--""" + str(i + 1)) a_ , a_ : str = map(int, input().split()) a_ : List[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes) a_ : int = burst_time a_ : Union[str, Any] = no_of_processes a_ : Optional[int] = waiting_time a_ : Any = calculate_turnaroundtime(bt, n, wt) calculate_average_times(waiting_time, turn_around_time, no_of_processes) a_ : Optional[int] = pd.DataFrame( list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)), columns=[ """Process""", """BurstTime""", """ArrivalTime""", """WaitingTime""", """TurnAroundTime""", ], ) # Printing the dataFrame pd.set_option("""display.max_rows""", fcfs.shape[0] + 1) print(fcfs)
445
1
'''simple docstring''' import os import sys import unittest __a: List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __a: Dict = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""") __a: str = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""") class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase( self ) -> List[Any]: lowercase__ : Any = get_test_to_tester_mapping(__lowerCAmelCase ) lowercase__ : Dict = get_test_to_tester_mapping(__lowerCAmelCase ) lowercase__ : Optional[Any] = {'''BertModelTest''': '''BertModelTester'''} lowercase__ : List[str] = { '''BlipModelTest''': '''BlipModelTester''', '''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''', '''BlipTextModelTest''': '''BlipTextModelTester''', '''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''', '''BlipVQAModelTest''': '''BlipVQAModelTester''', '''BlipVisionModelTest''': '''BlipVisionModelTester''', } self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) , __lowerCAmelCase ) self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) , __lowerCAmelCase ) def _lowerCAmelCase( self ) -> int: lowercase__ : Any = get_model_to_test_mapping(__lowerCAmelCase ) lowercase__ : int = get_model_to_test_mapping(__lowerCAmelCase ) lowercase__ : str = { '''BertForMaskedLM''': ['''BertModelTest'''], '''BertForMultipleChoice''': ['''BertModelTest'''], '''BertForNextSentencePrediction''': ['''BertModelTest'''], '''BertForPreTraining''': ['''BertModelTest'''], '''BertForQuestionAnswering''': ['''BertModelTest'''], '''BertForSequenceClassification''': ['''BertModelTest'''], '''BertForTokenClassification''': ['''BertModelTest'''], '''BertLMHeadModel''': ['''BertModelTest'''], '''BertModel''': ['''BertModelTest'''], } lowercase__ : Dict = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''], '''BlipModel''': ['''BlipModelTest'''], '''BlipTextModel''': ['''BlipTextModelTest'''], '''BlipVisionModel''': ['''BlipVisionModelTest'''], } self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) , __lowerCAmelCase ) self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) , __lowerCAmelCase ) def _lowerCAmelCase( self ) -> Any: lowercase__ : Dict = get_model_to_tester_mapping(__lowerCAmelCase ) lowercase__ : Tuple = get_model_to_tester_mapping(__lowerCAmelCase ) lowercase__ : List[str] = { '''BertForMaskedLM''': ['''BertModelTester'''], '''BertForMultipleChoice''': ['''BertModelTester'''], '''BertForNextSentencePrediction''': ['''BertModelTester'''], '''BertForPreTraining''': ['''BertModelTester'''], '''BertForQuestionAnswering''': ['''BertModelTester'''], '''BertForSequenceClassification''': ['''BertModelTester'''], '''BertForTokenClassification''': ['''BertModelTester'''], '''BertLMHeadModel''': ['''BertModelTester'''], '''BertModel''': ['''BertModelTester'''], } lowercase__ : Tuple = { '''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''], '''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''], '''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''], '''BlipModel''': ['''BlipModelTester'''], '''BlipTextModel''': ['''BlipTextModelTester'''], '''BlipVisionModel''': ['''BlipVisionModelTester'''], } self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) , __lowerCAmelCase ) self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) , __lowerCAmelCase )
152
'''simple docstring''' import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class UpperCAmelCase ( a__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=None , **__lowerCAmelCase ) -> Dict: lowercase__ : Any = parent lowercase__ : List[Any] = config_class lowercase__ : Dict = has_text_modality lowercase__ : List[str] = kwargs lowercase__ : List[Any] = common_properties def _lowerCAmelCase( self ) -> Any: lowercase__ : int = self.config_class(**self.inputs_dict ) lowercase__ : Any = ( ['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers'''] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(['''vocab_size'''] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) , msg=F"""`{prop}` does not exist""" ) # Test that config has the common properties as setter for idx, name in enumerate(__lowerCAmelCase ): try: setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) self.parent.assertEqual( getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , msg=F"""`{name} value {idx} expected, but was {getattr(__lowerCAmelCase , __lowerCAmelCase )}""" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(__lowerCAmelCase ): try: lowercase__ : Tuple = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , msg=F"""`{name} value {idx} expected, but was {getattr(__lowerCAmelCase , __lowerCAmelCase )}""" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def _lowerCAmelCase( self ) -> Tuple: lowercase__ : Union[str, Any] = self.config_class(**self.inputs_dict ) lowercase__ : int = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , __lowerCAmelCase ) def _lowerCAmelCase( self ) -> int: lowercase__ : List[str] = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ : Tuple = os.path.join(__lowerCAmelCase , '''config.json''' ) config_first.to_json_file(__lowerCAmelCase ) lowercase__ : Tuple = self.config_class.from_json_file(__lowerCAmelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _lowerCAmelCase( self ) -> Optional[Any]: lowercase__ : Optional[int] = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(__lowerCAmelCase ) lowercase__ : Dict = self.config_class.from_pretrained(__lowerCAmelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _lowerCAmelCase( self ) -> Optional[int]: lowercase__ : Union[str, Any] = self.config_class(**self.inputs_dict ) lowercase__ : str = '''test''' with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ : Any = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) config_first.save_pretrained(__lowerCAmelCase ) lowercase__ : Tuple = self.config_class.from_pretrained(__lowerCAmelCase , subfolder=__lowerCAmelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _lowerCAmelCase( self ) -> List[Any]: lowercase__ : List[Any] = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) lowercase__ : List[Any] = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def _lowerCAmelCase( self ) -> Any: if self.config_class.is_composition: return lowercase__ : Tuple = self.config_class() self.parent.assertIsNotNone(__lowerCAmelCase ) def _lowerCAmelCase( self ) -> Any: lowercase__ : str = copy.deepcopy(__lowerCAmelCase ) lowercase__ : Dict = self.config_class(**__lowerCAmelCase ) lowercase__ : Dict = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) ) elif getattr(__lowerCAmelCase , __lowerCAmelCase ) != value: wrong_values.append((key, getattr(__lowerCAmelCase , __lowerCAmelCase ), value) ) if len(__lowerCAmelCase ) > 0: lowercase__ : Any = '''\n'''.join([F"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] ) raise ValueError(F"""The following keys were not properly set in the config:\n{errors}""" ) def _lowerCAmelCase( self ) -> Any: self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
152
1
from __future__ import annotations def _lowercase ( SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : int ): """simple docstring""" if len(SCREAMING_SNAKE_CASE_ ) < k or k < 0: raise ValueError("""Invalid Input""" ) UpperCamelCase = UpperCamelCase = sum(array[:k] ) for i in range(len(SCREAMING_SNAKE_CASE_ ) - k ): UpperCamelCase = current_sum - array[i] + array[i + k] UpperCamelCase = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return max_sum if __name__ == "__main__": from doctest import testmod from random import randint testmod() __snake_case = [randint(-1_000, 1_000) for i in range(100)] __snake_case = randint(0, 110) print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
181
import torch from transformers import AutoModel class UpperCAmelCase ( torch.nn.Module ): def __init__( self : int , __magic_name__ : List[Any]="sayef/fsner-bert-base-uncased" ): """simple docstring""" super(__magic_name__ , self ).__init__() UpperCamelCase = AutoModel.from_pretrained(__magic_name__ , return_dict=__magic_name__ ) UpperCamelCase = torch.nn.CosineSimilarity(3 , 1e-08 ) UpperCamelCase = torch.nn.Softmax(dim=1 ) def lowerCamelCase_ ( self : Optional[int] , **__magic_name__ : List[Any] ): """simple docstring""" return self.bert(**__magic_name__ ).last_hidden_state def lowerCamelCase_ ( self : Tuple , __magic_name__ : int ): """simple docstring""" return token_embeddings.sum(2 , keepdim=__magic_name__ ) def lowerCamelCase_ ( self : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any]=1 ): """simple docstring""" return self.softmax(T * self.cos(__magic_name__ , __magic_name__ ) ) def lowerCamelCase_ ( self : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] ): """simple docstring""" UpperCamelCase = W_supports["""sizes"""].tolist() UpperCamelCase = W_supports["""start_token_id"""].item() UpperCamelCase = W_supports["""end_token_id"""].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] UpperCamelCase = self.BERT(**__magic_name__ ) UpperCamelCase = self.BERT(**__magic_name__ ) UpperCamelCase = None UpperCamelCase = None UpperCamelCase = W_supports["""input_ids"""] == start_token_id UpperCamelCase = W_supports["""input_ids"""] == end_token_id for i, size in enumerate(__magic_name__ ): if i == 0: UpperCamelCase = 0 else: UpperCamelCase = support_sizes[i - 1] UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]] UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]] UpperCamelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) UpperCamelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: UpperCamelCase = torch.vstack((p_starts, p_start) ) UpperCamelCase = torch.vstack((p_ends, p_end) ) else: UpperCamelCase = p_start UpperCamelCase = p_end return p_starts, p_ends
181
1
class _A : """simple docstring""" def __init__( self : Dict , __SCREAMING_SNAKE_CASE : list ) -> None: __UpperCAmelCase =set_counts __UpperCAmelCase =max(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =len(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =[1] * num_sets __UpperCAmelCase =list(range(__SCREAMING_SNAKE_CASE ) ) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> bool: __UpperCAmelCase =self.get_parent(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.get_parent(__SCREAMING_SNAKE_CASE ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] __UpperCAmelCase =0 __UpperCAmelCase =dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 __UpperCAmelCase =self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] __UpperCAmelCase =0 __UpperCAmelCase =src_parent __UpperCAmelCase =self.set_counts[src_parent] __UpperCAmelCase =max(self.max_set , __SCREAMING_SNAKE_CASE ) return True def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> int: if self.parents[disj_set] == disj_set: return disj_set __UpperCAmelCase =self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
68
import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A = logging.get_logger(__name__) __A = {"vocab_file": "spiece.model"} __A = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } __A = { "AI-Sweden/gpt-sw3-126m": 20_48, "AI-Sweden/gpt-sw3-350m": 20_48, "AI-Sweden/gpt-sw3-1.6b": 20_48, "AI-Sweden/gpt-sw3-6.7b": 20_48, "AI-Sweden/gpt-sw3-20b": 20_48, } class _A ( UpperCamelCase ): """simple docstring""" lowerCamelCase : int = VOCAB_FILES_NAMES lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Optional[Any] = ['input_ids', 'attention_mask'] def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> None: __UpperCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs __UpperCAmelCase =kwargs.get("""name_or_path""" ) if name_or_path is None: logger.warning( """name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,""" """ you are testing the model, this can safely be ignored""" ) __UpperCAmelCase ="""None""" # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing __UpperCAmelCase ="""<|endoftext|>""" if eos_token is None else eos_token __UpperCAmelCase ="""<unk>""" if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: __UpperCAmelCase =unk_token if pad_token is None else pad_token __UpperCAmelCase =eos_token if bos_token is None else bos_token else: __UpperCAmelCase ="""<pad>""" if pad_token is None else pad_token __UpperCAmelCase ="""<s>""" if bos_token is None else bos_token super().__init__( do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , ) __UpperCAmelCase =do_lower_case __UpperCAmelCase =remove_space __UpperCAmelCase =keep_accents __UpperCAmelCase =vocab_file __UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__SCREAMING_SNAKE_CASE ) # Used for whitespace normalization in input texts # fmt : off __UpperCAmelCase ={""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """„"""} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing __UpperCAmelCase =re.compile( f'''[{"".join(map(__SCREAMING_SNAKE_CASE , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' ) def __getstate__( self : Any ) -> str: __UpperCAmelCase =self.__dict__.copy() __UpperCAmelCase =None return state def __setstate__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]: __UpperCAmelCase =d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __UpperCAmelCase ={} __UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def _a ( self : Union[str, Any] ) -> int: return len(self.sp_model ) def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str ) -> str: __UpperCAmelCase =self.non_printing_characters_re.sub("""""" , __SCREAMING_SNAKE_CASE ) # Normalize whitespaces __UpperCAmelCase ="""""".join([char if char not in self.whitespaces else """ """ for char in text] ) # NFC Unicode normalization __UpperCAmelCase =unicodedata.normalize("""NFC""" , __SCREAMING_SNAKE_CASE ) return text def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]: __UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE ) return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> int: return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> str: return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE ) @staticmethod def _a ( __SCREAMING_SNAKE_CASE : str ) -> str: return out_string def _a ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> str: __UpperCAmelCase =[] __UpperCAmelCase ="""""" __UpperCAmelCase =False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token __UpperCAmelCase =True __UpperCAmelCase =[] else: current_sub_tokens.append(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =False out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) return out_string def _a ( self : Any ) -> Dict[str, int]: __UpperCAmelCase ={self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _a ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase =os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi: __UpperCAmelCase =self.sp_model.serialized_model_proto() fi.write(__SCREAMING_SNAKE_CASE ) return (out_vocab_file,) def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE ) else: __UpperCAmelCase =[self.preprocess_text(__SCREAMING_SNAKE_CASE ) for t in text] __UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE ) if return_tensors is True or return_tensors == "pt": __UpperCAmelCase =torch.tensor(__SCREAMING_SNAKE_CASE ) return token_ids def _a ( self : str , __SCREAMING_SNAKE_CASE : Union[int, List[int]] ) -> str: return self.sp_model.decode(__SCREAMING_SNAKE_CASE ) def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : "Conversation" ) -> List[int]: __UpperCAmelCase =[f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()] __UpperCAmelCase =( f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(__SCREAMING_SNAKE_CASE ) + f'''{self.bos_token}Bot:''' ) return self.encode(text=__SCREAMING_SNAKE_CASE )
68
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu lowerCamelCase__ : str = False class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:Tuple ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): return 12 @property def SCREAMING_SNAKE_CASE__ ( self:int ): return 12 @property def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): return 32 @property def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): torch.manual_seed(0 ) snake_case__ = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def SCREAMING_SNAKE_CASE__ ( self:int ): snake_case__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def SCREAMING_SNAKE_CASE__ ( self:Dict ): torch.manual_seed(0 ) snake_case__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModel(_a ) @property def SCREAMING_SNAKE_CASE__ ( self:List[str] ): torch.manual_seed(0 ) snake_case__ = 12 snake_case__ = 12 snake_case__ = { '''attention_bias''': True, '''cross_attention_dim''': 32, '''attention_head_dim''': height * width, '''num_attention_heads''': 1, '''num_vector_embeds''': self.num_embed, '''num_embeds_ada_norm''': self.num_embeds_ada_norm, '''norm_num_groups''': 32, '''sample_size''': width, '''activation_fn''': '''geglu-approximate''', } snake_case__ = TransformeraDModel(**_a ) return model def SCREAMING_SNAKE_CASE__ ( self:Tuple ): snake_case__ = '''cpu''' snake_case__ = self.dummy_vqvae snake_case__ = self.dummy_text_encoder snake_case__ = self.dummy_tokenizer snake_case__ = self.dummy_transformer snake_case__ = VQDiffusionScheduler(self.num_embed ) snake_case__ = LearnedClassifierFreeSamplingEmbeddings(learnable=_a ) snake_case__ = VQDiffusionPipeline( vqvae=_a , text_encoder=_a , tokenizer=_a , transformer=_a , scheduler=_a , learned_classifier_free_sampling_embeddings=_a , ) snake_case__ = pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) snake_case__ = '''teddy bear playing in the pool''' snake_case__ = torch.Generator(device=_a ).manual_seed(0 ) snake_case__ = pipe([prompt] , generator=_a , num_inference_steps=2 , output_type='''np''' ) snake_case__ = output.images snake_case__ = torch.Generator(device=_a ).manual_seed(0 ) snake_case__ = pipe( [prompt] , generator=_a , output_type='''np''' , return_dict=_a , num_inference_steps=2 )[0] snake_case__ = image[0, -3:, -3:, -1] snake_case__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case__ = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE__ ( self:str ): snake_case__ = '''cpu''' snake_case__ = self.dummy_vqvae snake_case__ = self.dummy_text_encoder snake_case__ = self.dummy_tokenizer snake_case__ = self.dummy_transformer snake_case__ = VQDiffusionScheduler(self.num_embed ) snake_case__ = LearnedClassifierFreeSamplingEmbeddings( learnable=_a , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) snake_case__ = VQDiffusionPipeline( vqvae=_a , text_encoder=_a , tokenizer=_a , transformer=_a , scheduler=_a , learned_classifier_free_sampling_embeddings=_a , ) snake_case__ = pipe.to(_a ) pipe.set_progress_bar_config(disable=_a ) snake_case__ = '''teddy bear playing in the pool''' snake_case__ = torch.Generator(device=_a ).manual_seed(0 ) snake_case__ = pipe([prompt] , generator=_a , num_inference_steps=2 , output_type='''np''' ) snake_case__ = output.images snake_case__ = torch.Generator(device=_a ).manual_seed(0 ) snake_case__ = pipe( [prompt] , generator=_a , output_type='''np''' , return_dict=_a , num_inference_steps=2 )[0] snake_case__ = image[0, -3:, -3:, -1] snake_case__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case__ = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class __magic_name__ (unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ): snake_case__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' ) snake_case__ = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' ) snake_case__ = pipeline.to(_a ) pipeline.set_progress_bar_config(disable=_a ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though snake_case__ = torch.Generator(device=_a ).manual_seed(0 ) snake_case__ = pipeline( '''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=_a , output_type='''np''' , ) snake_case__ = output.images[0] assert image.shape == (2_56, 2_56, 3) assert np.abs(expected_image - image ).max() < 2.0
208
from __future__ import annotations from collections.abc import Sequence from typing import Literal def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> str | Literal[False]: snake_case__ = list(__lowerCAmelCase ) snake_case__ = list(__lowerCAmelCase ) snake_case__ = 0 for i in range(len(__lowerCAmelCase ) ): if lista[i] != lista[i]: count += 1 snake_case__ = '''_''' if count > 1: return False else: return "".join(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list[str]: snake_case__ = [] while True: snake_case__ = ['''$'''] * len(__lowerCAmelCase ) snake_case__ = [] for i in range(len(__lowerCAmelCase ) ): for j in range(i + 1 , len(__lowerCAmelCase ) ): snake_case__ = compare_string(binary[i] , binary[j] ) if k is False: snake_case__ = '''*''' snake_case__ = '''*''' temp.append('''X''' ) for i in range(len(__lowerCAmelCase ) ): if checka[i] == "$": pi.append(binary[i] ) if len(__lowerCAmelCase ) == 0: return pi snake_case__ = list(set(__lowerCAmelCase ) ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> list[str]: snake_case__ = [] for minterm in minterms: snake_case__ = '''''' for _ in range(__lowerCAmelCase ): snake_case__ = str(minterm % 2 ) + string minterm //= 2 temp.append(__lowerCAmelCase ) return temp def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool: snake_case__ = list(__lowerCAmelCase ) snake_case__ = list(__lowerCAmelCase ) snake_case__ = 0 for i in range(len(__lowerCAmelCase ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> list[str]: snake_case__ = [] snake_case__ = [0] * len(__lowerCAmelCase ) for i in range(len(chart[0] ) ): snake_case__ = 0 snake_case__ = -1 for j in range(len(__lowerCAmelCase ) ): if chart[j][i] == 1: count += 1 snake_case__ = j if count == 1: snake_case__ = 1 for i in range(len(__lowerCAmelCase ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(__lowerCAmelCase ) ): snake_case__ = 0 temp.append(prime_implicants[i] ) while True: snake_case__ = 0 snake_case__ = -1 snake_case__ = 0 for i in range(len(__lowerCAmelCase ) ): snake_case__ = chart[i].count(1 ) if count_n > max_n: snake_case__ = count_n snake_case__ = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(__lowerCAmelCase ) ): snake_case__ = 0 def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> list[list[int]]: snake_case__ = [[0 for x in range(len(__lowerCAmelCase ) )] for x in range(len(__lowerCAmelCase ) )] for i in range(len(__lowerCAmelCase ) ): snake_case__ = prime_implicants[i].count('''_''' ) for j in range(len(__lowerCAmelCase ) ): if is_for_table(prime_implicants[i] , binary[j] , __lowerCAmelCase ): snake_case__ = 1 return chart def SCREAMING_SNAKE_CASE ( ) -> None: snake_case__ = int(input('''Enter the no. of variables\n''' ) ) snake_case__ = [ float(__lowerCAmelCase ) for x in input( '''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split() ] snake_case__ = decimal_to_binary(__lowerCAmelCase , __lowerCAmelCase ) snake_case__ = check(__lowerCAmelCase ) print('''Prime Implicants are:''' ) print(__lowerCAmelCase ) snake_case__ = prime_implicant_chart(__lowerCAmelCase , __lowerCAmelCase ) snake_case__ = selection(__lowerCAmelCase , __lowerCAmelCase ) print('''Essential Prime Implicants are:''' ) print(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
208
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) A = { 'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'], 'processing_speech_to_text': ['Speech2TextProcessor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = ['Speech2TextTokenizer'] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = ['Speech2TextFeatureExtractor'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ 'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSpeech2TextForConditionalGeneration', 'TFSpeech2TextModel', 'TFSpeech2TextPreTrainedModel', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ 'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Speech2TextForConditionalGeneration', 'Speech2TextModel', 'Speech2TextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
187
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A = logging.get_logger(__name__) A = { 'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json', } class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ): """simple docstring""" __A = """bit""" __A = ["""preactivation""", """bottleneck"""] __A = ["""SAME""", """VALID"""] def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=64 , __UpperCamelCase=[2_56, 5_12, 10_24, 20_48] , __UpperCamelCase=[3, 4, 6, 3] , __UpperCamelCase="preactivation" , __UpperCamelCase="relu" , __UpperCamelCase=None , __UpperCamelCase=32 , __UpperCamelCase=0.0 , __UpperCamelCase=False , __UpperCamelCase=32 , __UpperCamelCase=1 , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ): """simple docstring""" super().__init__(**__UpperCamelCase ) if layer_type not in self.layer_types: raise ValueError(f"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" ) if global_padding is not None: if global_padding.upper() in self.supported_padding: snake_case_ = global_padding.upper() else: raise ValueError(f"""Padding strategy {global_padding} not supported""" ) snake_case_ = num_channels snake_case_ = embedding_size snake_case_ = hidden_sizes snake_case_ = depths snake_case_ = layer_type snake_case_ = hidden_act snake_case_ = global_padding snake_case_ = num_groups snake_case_ = drop_path_rate snake_case_ = embedding_dynamic_padding snake_case_ = output_stride snake_case_ = width_factor snake_case_ = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(__UpperCamelCase ) + 1 )] snake_case_ , snake_case_ = get_aligned_output_features_output_indices( out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names )
187
1
'''simple docstring''' import torch def _lowercase ( ): if torch.cuda.is_available(): __A : Optional[int] = torch.cuda.device_count() else: __A : Any = 0 print(f"""Successfully ran on {num_gpus} GPUs""" ) if __name__ == "__main__": main()
540
'''simple docstring''' import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class _lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @require_torch def snake_case__ ( self ): """simple docstring""" __A : Optional[int] = pipeline( task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused' ) __A : str = load_dataset('ashraq/esc50' ) __A : Union[str, Any] = dataset['train']['audio'][-1]['array'] __A : List[str] = audio_classifier(__lowercase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] ) self.assertEqual( nested_simplify(__lowercase ) , [{'score': 0.5_0_1, 'label': 'Sound of a dog'}, {'score': 0.4_9_9, 'label': 'Sound of vaccum cleaner'}] , ) @unittest.skip('No models are available in TF' ) def snake_case__ ( self ): """simple docstring""" pass @slow @require_torch def snake_case__ ( self ): """simple docstring""" __A : Dict = pipeline( task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , ) # This is an audio of a dog __A : Optional[Any] = load_dataset('ashraq/esc50' ) __A : List[str] = dataset['train']['audio'][-1]['array'] __A : Union[str, Any] = audio_classifier(__lowercase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] ) self.assertEqual( nested_simplify(__lowercase ) , [ {'score': 0.9_9_9, 'label': 'Sound of a dog'}, {'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'}, ] , ) __A : Any = audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] ) self.assertEqual( nested_simplify(__lowercase ) , [ [ {'score': 0.9_9_9, 'label': 'Sound of a dog'}, {'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'}, ], ] * 5 , ) __A : str = audio_classifier( [audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5 ) self.assertEqual( nested_simplify(__lowercase ) , [ [ {'score': 0.9_9_9, 'label': 'Sound of a dog'}, {'score': 0.0_0_1, 'label': 'Sound of vaccum cleaner'}, ], ] * 5 , ) @unittest.skip('No models are available in TF' ) def snake_case__ ( self ): """simple docstring""" pass
540
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _a: Optional[int] = { "configuration_bigbird_pegasus": [ "BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP", "BigBirdPegasusConfig", "BigBirdPegasusOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a: Optional[Any] = [ "BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST", "BigBirdPegasusForCausalLM", "BigBirdPegasusForConditionalGeneration", "BigBirdPegasusForQuestionAnswering", "BigBirdPegasusForSequenceClassification", "BigBirdPegasusModel", "BigBirdPegasusPreTrainedModel", ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys _a: List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
162
'''simple docstring''' import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ : int = logging.get_logger(__name__) UpperCAmelCase_ : Optional[Any] = { "nvidia/segformer-b0-finetuned-ade-512-512": ( "https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json" ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class a ( snake_case__ ): '''simple docstring''' __lowerCAmelCase : List[Any] = """segformer""" def __init__( self , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=[2, 2, 2, 2] , lowerCamelCase_=[8, 4, 2, 1] , lowerCamelCase_=[3_2, 6_4, 1_6_0, 2_5_6] , lowerCamelCase_=[7, 3, 3, 3] , lowerCamelCase_=[4, 2, 2, 2] , lowerCamelCase_=[1, 2, 5, 8] , lowerCamelCase_=[4, 4, 4, 4] , lowerCamelCase_="gelu" , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.1 , lowerCamelCase_=0.02 , lowerCamelCase_=0.1 , lowerCamelCase_=1e-6 , lowerCamelCase_=2_5_6 , lowerCamelCase_=2_5_5 , **lowerCamelCase_ , ) -> Union[str, Any]: super().__init__(**lowerCamelCase_ ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( 'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be' ' removed, as the behaviour will default to that of reshape_last_stage = True.' , lowerCamelCase_ , ) _a : Union[str, Any] = num_channels _a : Any = num_encoder_blocks _a : Union[str, Any] = depths _a : int = sr_ratios _a : List[str] = hidden_sizes _a : Tuple = patch_sizes _a : Any = strides _a : List[Any] = mlp_ratios _a : str = num_attention_heads _a : str = hidden_act _a : List[Any] = hidden_dropout_prob _a : int = attention_probs_dropout_prob _a : Any = classifier_dropout_prob _a : Optional[Any] = initializer_range _a : int = drop_path_rate _a : int = layer_norm_eps _a : Optional[Any] = decoder_hidden_size _a : int = kwargs.get('reshape_last_stage' , lowerCamelCase_ ) _a : str = semantic_loss_ignore_index class a ( snake_case__ ): '''simple docstring''' __lowerCAmelCase : Any = version.parse("""1.11""" ) @property def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __UpperCamelCase ( self ) -> float: return 1e-4 @property def __UpperCamelCase ( self ) -> int: return 1_2
120
0
'''simple docstring''' import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case : Optional[Any] = logging.get_logger(__name__) _snake_case : List[Any] = { 'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json', 'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json', } class A ( _a ): lowercase_ = 'encodec' def __init__( self : List[Any] , lowerCAmelCase_ : List[str]=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase_ : Dict=2_40_00 , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Union[str, Any]=1_28 , lowerCAmelCase_ : Any=32 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : Union[str, Any]=[8, 5, 4, 2] , lowerCAmelCase_ : Optional[int]="weight_norm" , lowerCAmelCase_ : List[Any]=7 , lowerCAmelCase_ : Union[str, Any]=7 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Dict="reflect" , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Optional[int]=1.0 , lowerCAmelCase_ : List[str]=10_24 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : int=True , **lowerCAmelCase_ : int , ) -> Union[str, Any]: """simple docstring""" _a = target_bandwidths _a = sampling_rate _a = audio_channels _a = normalize _a = chunk_length_s _a = overlap _a = hidden_size _a = num_filters _a = num_residual_layers _a = upsampling_ratios _a = norm_type _a = kernel_size _a = last_kernel_size _a = residual_kernel_size _a = dilation_growth_rate _a = use_causal_conv _a = pad_mode _a = compress _a = num_lstm_layers _a = trim_right_ratio _a = codebook_size _a = codebook_dim if codebook_dim is not None else hidden_size _a = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( F'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' ) super().__init__(**lowerCAmelCase_ ) @property def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def __lowerCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def __lowerCAmelCase ( self : List[Any] ) -> int: """simple docstring""" _a = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def __lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
721
'''simple docstring''' import sys from collections import defaultdict class A : def __init__( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" _a = [] def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] ) -> int: """simple docstring""" return self.node_position[vertex] def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> Tuple: """simple docstring""" _a = pos def __lowerCAmelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ) -> Any: """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: _a = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: _a = 2 * start + 1 else: _a = 2 * start + 2 if heap[smallest_child] < heap[start]: _a , _a = heap[smallest_child], positions[smallest_child] _a , _a = ( heap[start], positions[start], ) _a , _a = temp, tempa _a = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , lowerCAmelCase_ ) self.top_to_bottom(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict ) -> Any: """simple docstring""" _a = position[index] while index != 0: _a = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: _a = heap[parent] _a = position[parent] self.set_position(position[parent] , lowerCAmelCase_ ) else: _a = val _a = temp self.set_position(lowerCAmelCase_ , lowerCAmelCase_ ) break _a = parent else: _a = val _a = temp self.set_position(lowerCAmelCase_ , 0 ) def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]: """simple docstring""" _a = len(lowerCAmelCase_ ) // 2 - 1 for i in range(lowerCAmelCase_ , -1 , -1 ): self.top_to_bottom(lowerCAmelCase_ , lowerCAmelCase_ , len(lowerCAmelCase_ ) , lowerCAmelCase_ ) def __lowerCAmelCase ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int ) -> List[Any]: """simple docstring""" _a = positions[0] _a = sys.maxsize self.top_to_bottom(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ ) return temp def snake_case_ (UpperCamelCase : Any ): '''simple docstring''' _a = Heap() _a = [0] * len(UpperCamelCase ) _a = [-1] * len(UpperCamelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph _a = [] # Heap of Distance of vertices from their neighboring vertex _a = [] for vertex in range(len(UpperCamelCase ) ): distance_tv.append(sys.maxsize ) positions.append(UpperCamelCase ) heap.node_position.append(UpperCamelCase ) _a = [] _a = 1 _a = sys.maxsize for neighbor, distance in adjacency_list[0]: _a = 0 _a = distance heap.heapify(UpperCamelCase , UpperCamelCase ) for _ in range(1 , len(UpperCamelCase ) ): _a = heap.delete_minimum(UpperCamelCase , UpperCamelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) _a = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(UpperCamelCase )] ): _a = distance heap.bottom_to_top( UpperCamelCase , heap.get_position(UpperCamelCase ) , UpperCamelCase , UpperCamelCase ) _a = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > _snake_case : List[str] = int(input('Enter number of edges: ').strip()) _snake_case : Union[str, Any] = defaultdict(list) for _ in range(edges_number): _snake_case : Tuple = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
377
0
'''simple docstring''' from __future__ import annotations __A : Optional[int] = 'Muhammad Umer Farooq' __A : List[Any] = 'MIT' __A : Optional[int] = '1.0.0' __A : Dict = 'Muhammad Umer Farooq' __A : List[Any] = 'contact@muhammadumerfarooq.me' __A : Union[str, Any] = 'Alpha' import re from html.parser import HTMLParser from urllib import parse import requests class __UpperCamelCase ( _a ): def __init__( self :Dict ,_UpperCamelCase :str ): super().__init__() snake_case_ : List[str] = [] snake_case_ : str = domain def a__ ( self :int ,_UpperCamelCase :str ,_UpperCamelCase :list[tuple[str, str | None]] ): # Only parse the 'anchor' tag. if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: snake_case_ : Tuple = parse.urljoin(self.domain ,_UpperCamelCase ) self.urls.append(_UpperCamelCase ) def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' return ".".join(get_sub_domain_name(snake_case__ ).split(""".""" )[-2:] ) def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' return parse.urlparse(snake_case__ ).netloc def UpperCAmelCase ( lowerCamelCase_ :str = "https://github.com" ): '''simple docstring''' snake_case_ : str = get_domain_name(snake_case__ ) # Initialize the parser snake_case_ : Optional[int] = Parser(snake_case__ ) try: # Open URL snake_case_ : Optional[Any] = requests.get(snake_case__ ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through snake_case_ : Any = set() for link in parser.urls: # open URL. # read = requests.get(link) try: snake_case_ : int = requests.get(snake_case__ ) # Get the valid email. snake_case_ : int = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text ) # If not in list then append it. for email in emails: valid_emails.add(snake_case__ ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(snake_case__ ) if __name__ == "__main__": __A : Optional[Any] = emails_from_url('https://github.com') print(F'{len(emails)} emails found:') print('\n'.join(sorted(emails)))
334
"""simple docstring""" from __future__ import annotations import math def lowercase (snake_case__ : int ) -> list[int]: '''simple docstring''' if num <= 0: lowerCAmelCase = f'''{num}: Invalid input, please enter a positive integer.''' raise ValueError(snake_case__ ) lowerCAmelCase = [True] * (num + 1) lowerCAmelCase = [] lowerCAmelCase = 2 lowerCAmelCase = int(math.sqrt(snake_case__ ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(snake_case__ ) # Set multiples of start be False for i in range(start * start , num + 1 , snake_case__ ): if sieve[i] is True: lowerCAmelCase = False start += 1 for j in range(end + 1 , num + 1 ): if sieve[j] is True: prime.append(snake_case__ ) return prime if __name__ == "__main__": print(prime_sieve(int(input('Enter a positive integer: ').strip())))
169
0
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { 'openai/imagegpt-small': '', 'openai/imagegpt-medium': '', 'openai/imagegpt-large': '', } class lowerCamelCase_ ( __lowerCamelCase ): """simple docstring""" _lowerCAmelCase : str = "imagegpt" _lowerCAmelCase : List[str] = ["past_key_values"] _lowerCAmelCase : str = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , UpperCAmelCase__=512 + 1 , UpperCAmelCase__=32 * 32 , UpperCAmelCase__=512 , UpperCAmelCase__=24 , UpperCAmelCase__=8 , UpperCAmelCase__=None , UpperCAmelCase__="quick_gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=1e-5 , UpperCAmelCase__=0.02 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=False , UpperCAmelCase__=False , UpperCAmelCase__=False , **UpperCAmelCase__ , ): SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = n_positions SCREAMING_SNAKE_CASE__ = n_embd SCREAMING_SNAKE_CASE__ = n_layer SCREAMING_SNAKE_CASE__ = n_head SCREAMING_SNAKE_CASE__ = n_inner SCREAMING_SNAKE_CASE__ = activation_function SCREAMING_SNAKE_CASE__ = resid_pdrop SCREAMING_SNAKE_CASE__ = embd_pdrop SCREAMING_SNAKE_CASE__ = attn_pdrop SCREAMING_SNAKE_CASE__ = layer_norm_epsilon SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = scale_attn_weights SCREAMING_SNAKE_CASE__ = use_cache SCREAMING_SNAKE_CASE__ = scale_attn_by_inverse_layer_idx SCREAMING_SNAKE_CASE__ = reorder_and_upcast_attn SCREAMING_SNAKE_CASE__ = tie_word_embeddings super().__init__(tie_word_embeddings=UpperCAmelCase_ , **UpperCAmelCase_ ) class lowerCamelCase_ ( __lowerCamelCase ): """simple docstring""" @property def lowerCAmelCase__ ( self ): return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), ] ) def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ = 1 , UpperCAmelCase__ = -1 , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = 3 , UpperCAmelCase__ = 32 , UpperCAmelCase__ = 32 , ): SCREAMING_SNAKE_CASE__ = self._generate_dummy_images(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = dict(preprocessor(images=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ ) ) return inputs
706
"""simple docstring""" def __lowercase ( lowerCamelCase_ : Tuple ): SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = { "^": 3, "*": 2, "/": 2, "%": 2, "+": 1, "-": 1, } # Priority of each operator SCREAMING_SNAKE_CASE__ = len(lowerCamelCase_ ) if (len(lowerCamelCase_ ) > 7) else 7 # Print table header for output print( "Symbol".center(8 ) , "Stack".center(lowerCamelCase_ ) , "Postfix".center(lowerCamelCase_ ) , sep=" | " , ) print("-" * (print_width * 3 + 7) ) for x in infix: if x.isalpha() or x.isdigit(): post_fix.append(lowerCamelCase_ ) # if x is Alphabet / Digit, add it to Postfix elif x == "(": stack.append(lowerCamelCase_ ) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered while stack[-1] != "(": post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix stack.pop() else: if len(lowerCamelCase_ ) == 0: stack.append(lowerCamelCase_ ) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack while len(lowerCamelCase_ ) > 0 and priority[x] <= priority[stack[-1]]: post_fix.append(stack.pop() ) # pop stack & add to Postfix stack.append(lowerCamelCase_ ) # push x to stack print( x.center(8 ) , ("".join(lowerCamelCase_ )).ljust(lowerCamelCase_ ) , ("".join(lowerCamelCase_ )).ljust(lowerCamelCase_ ) , sep=" | " , ) # Output in tabular format while len(lowerCamelCase_ ) > 0: # while stack is not empty post_fix.append(stack.pop() ) # pop stack & add to Postfix print( " ".center(8 ) , ("".join(lowerCamelCase_ )).ljust(lowerCamelCase_ ) , ("".join(lowerCamelCase_ )).ljust(lowerCamelCase_ ) , sep=" | " , ) # Output in tabular format return "".join(lowerCamelCase_ ) # return Postfix as str def __lowercase ( lowerCamelCase_ : str ): SCREAMING_SNAKE_CASE__ = list(infix[::-1] ) # reverse the infix equation for i in range(len(lowerCamelCase_ ) ): if infix[i] == "(": SCREAMING_SNAKE_CASE__ = ")" # change "(" to ")" elif infix[i] == ")": SCREAMING_SNAKE_CASE__ = "(" # change ")" to "(" return (infix_2_postfix("".join(lowerCamelCase_ ) ))[ ::-1 ] # call infix_2_postfix on Infix, return reverse of Postfix if __name__ == "__main__": _lowerCamelCase = input('\nEnter an Infix Equation = ') # Input an Infix equation _lowerCamelCase = ''.join(Infix.split()) # Remove spaces from the input print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
112
0
import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _snake_case : Optional[Any] = logging.get_logger(__name__) _snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'} _snake_case : Dict = { 'vocab_file': { 'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt', }, 'emoji_file': { 'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json', }, } _snake_case : str = { 'abeja/gpt-neox-japanese-2.7b': 2048, } def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str] ): with open(lowerCAmelCase_, 'r', encoding='utf-8' ) as f: __lowerCAmelCase = json.loads(f.read() ) __lowerCAmelCase = collections.OrderedDict() __lowerCAmelCase = collections.OrderedDict() __lowerCAmelCase = collections.OrderedDict() with open(lowerCAmelCase_, 'r', encoding='utf-8' ) as f: __lowerCAmelCase = f.readlines() __lowerCAmelCase = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token] for idx, b in enumerate(lowerCAmelCase_ ): __lowerCAmelCase = b __lowerCAmelCase = idx for wd in b: __lowerCAmelCase = idx return vocab, raw_vocab, ids_to_tokens, emoji class _UpperCAmelCase ( _UpperCamelCase ): """simple docstring""" a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["""input_ids""", """attention_mask"""] def __init__( self : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple="<|endoftext|>" , lowerCAmelCase_ : Optional[int]="<|endoftext|>" , lowerCAmelCase_ : Optional[Any]="<|startoftext|>" , lowerCAmelCase_ : Optional[Any]="<|endoftext|>" , lowerCAmelCase_ : Union[str, Any]=False , **lowerCAmelCase_ : List[str] , ) -> Any: super().__init__( unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , do_clean_text=lowerCAmelCase_ , **lowerCAmelCase_ , ) if not os.path.isfile(lowerCAmelCase_ ): raise ValueError( f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained""" ' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' ) if not os.path.isfile(lowerCAmelCase_ ): raise ValueError( f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google""" ' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' ) __lowerCAmelCase = do_clean_text __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = load_vocab_and_emoji(lowerCAmelCase_ , lowerCAmelCase_ ) __lowerCAmelCase = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def lowercase ( self : List[Any] ) -> List[Any]: # self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab return len(self.raw_vocab ) def lowercase ( self : Any ) -> Tuple: return dict(self.raw_vocab , **self.added_tokens_encoder ) def lowercase ( self : Any , lowerCAmelCase_ : Optional[Any] ) -> Tuple: return self.subword_tokenizer.tokenize(lowerCAmelCase_ , clean=self.do_clean_text ) def lowercase ( self : Optional[int] , lowerCAmelCase_ : Dict ) -> Optional[int]: return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token ) ) def lowercase ( self : List[str] , lowerCAmelCase_ : int ) -> int: return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase_ ) def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> Optional[int]: __lowerCAmelCase = ''.join(lowerCAmelCase_ ).strip() return out_string def lowercase ( self : List[str] , lowerCAmelCase_ : "Conversation" ) -> List[int]: __lowerCAmelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) + [self.eos_token_id] ) if len(lowerCAmelCase_ ) > self.model_max_length: __lowerCAmelCase = input_ids[-self.model_max_length :] return input_ids def lowercase ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: __lowerCAmelCase = 0 if os.path.isdir(lowerCAmelCase_ ): __lowerCAmelCase = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) __lowerCAmelCase = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] ) else: __lowerCAmelCase = ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file'] ) __lowerCAmelCase = ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file'] ) with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ' Please check that the vocabulary is not corrupted!' ) __lowerCAmelCase = token_index writer.write(','.join(lowerCAmelCase_ ) + '\n' ) index += 1 with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer: json.dump(self.emoji , lowerCAmelCase_ ) return vocab_file, emoji_file class _UpperCAmelCase ( _UpperCamelCase ): """simple docstring""" def __init__( self : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] ) -> str: __lowerCAmelCase = vocab # same as swe __lowerCAmelCase = ids_to_tokens # same as bpe __lowerCAmelCase = emoji __lowerCAmelCase = np.max([len(lowerCAmelCase_ ) for w in self.vocab.keys()] ) __lowerCAmelCase = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' ) __lowerCAmelCase = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' ) __lowerCAmelCase = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' ) __lowerCAmelCase = re.compile( R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' ) __lowerCAmelCase = re.compile( R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' ) __lowerCAmelCase = re.compile( R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' ) __lowerCAmelCase = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿' __lowerCAmelCase = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟' __lowerCAmelCase = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} ) def __len__( self : Union[str, Any] ) -> int: return len(self.ids_to_tokens ) def lowercase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]: __lowerCAmelCase = self.content_repattera.sub('<URL>' , lowerCAmelCase_ ) __lowerCAmelCase = self.content_repattera.sub('<EMAIL>' , lowerCAmelCase_ ) __lowerCAmelCase = self.content_repattera.sub('<TEL>' , lowerCAmelCase_ ) __lowerCAmelCase = self.content_repattera.sub('<DATE>' , lowerCAmelCase_ ) __lowerCAmelCase = self.content_repattera.sub('<DATE>' , lowerCAmelCase_ ) __lowerCAmelCase = self.content_repattera.sub('<PRICE>' , lowerCAmelCase_ ) __lowerCAmelCase = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: __lowerCAmelCase = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' ) return content def lowercase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]=False ) -> Any: __lowerCAmelCase = text.replace(' ' , '<SP>' ) __lowerCAmelCase = text.replace(' ' , '<SP>' ) __lowerCAmelCase = text.replace('\r\n' , '<BR>' ) __lowerCAmelCase = text.replace('\n' , '<BR>' ) __lowerCAmelCase = text.replace('\r' , '<BR>' ) __lowerCAmelCase = text.replace('\t' , '<TAB>' ) __lowerCAmelCase = text.replace('—' , 'ー' ) __lowerCAmelCase = text.replace('−' , 'ー' ) for k, v in self.emoji["emoji"].items(): if k in text: __lowerCAmelCase = text.replace(lowerCAmelCase_ , lowerCAmelCase_ ) if clean: __lowerCAmelCase = self.clean_text(lowerCAmelCase_ ) def check_simbol(lowerCAmelCase_ : List[str] ): __lowerCAmelCase = x.encode() if len(lowerCAmelCase_ ) == 1 and len(lowerCAmelCase_ ) == 2: __lowerCAmelCase = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0Xc2_a1 and c <= 0Xc2_bf) or (c >= 0Xc7_80 and c <= 0Xc7_83) or (c >= 0Xca_b9 and c <= 0Xcb_bf) or (c >= 0Xcc_80 and c <= 0Xcd_a2) ): return True return False def checkuae(lowerCAmelCase_ : List[str] ): __lowerCAmelCase = x.encode() if len(lowerCAmelCase_ ) == 1 and len(lowerCAmelCase_ ) == 3: __lowerCAmelCase = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0Xe2_80_80 and c <= 0Xe2_b0_7f: return True return False __lowerCAmelCase = 0 __lowerCAmelCase = [] while pos < len(lowerCAmelCase_ ): __lowerCAmelCase = min(len(lowerCAmelCase_ ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3 __lowerCAmelCase = [] # (token_id, token, pos) for e in range(lowerCAmelCase_ , lowerCAmelCase_ , -1 ): __lowerCAmelCase = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(lowerCAmelCase_ ) > 2: __lowerCAmelCase = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(lowerCAmelCase_ ) > 0: # the smallest token_id is adopted __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[0] )[0] result.append(lowerCAmelCase_ ) __lowerCAmelCase = e else: __lowerCAmelCase = pos + 1 __lowerCAmelCase = text[pos:end] if check_simbol(lowerCAmelCase_ ): result.append('<KIGOU>' ) elif checkuae(lowerCAmelCase_ ): result.append('<U2000U2BFF>' ) else: for i in wd.encode('utf-8' ): result.append('<|byte%d|>' % i ) __lowerCAmelCase = end return result def lowercase ( self : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]="\n" ) -> Tuple: __lowerCAmelCase = [] __lowerCAmelCase = [] __lowerCAmelCase = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(lowerCAmelCase_ ) > 0: words.append(bytearray(lowerCAmelCase_ ).decode('utf-8' , errors='replace' ) ) __lowerCAmelCase = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji['emoji_inv'][word] ) elif word == "<SP>": words.append(' ' ) elif word == "<BR>": words.append(lowerCAmelCase_ ) elif word == "<TAB>": words.append('\t' ) elif word == "<BLOCK>": words.append('▀' ) elif word == "<KIGOU>": words.append('ǀ' ) elif word == "<U2000U2BFF>": words.append('‖' ) else: words.append(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: words.append(bytearray(lowerCAmelCase_ ).decode('utf-8' , errors='replace' ) ) __lowerCAmelCase = ''.join(lowerCAmelCase_ ) return text
53
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCAmelCase = { """tokenizer_file""": { """EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""", }, } UpperCAmelCase = { """gpt-neox-20b""": 2_048, } class UpperCAmelCase_ ( _lowercase): snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = ['''input_ids''', '''attention_mask'''] def __init__( self : Dict , __UpperCamelCase : int=None , __UpperCamelCase : Any=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Tuple="<|endoftext|>" , __UpperCamelCase : int="<|endoftext|>" , __UpperCamelCase : Dict="<|endoftext|>" , __UpperCamelCase : Union[str, Any]=False , **__UpperCamelCase : Union[str, Any] , ) -> Any: super().__init__( __UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , ) _UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , __UpperCamelCase ) != add_prefix_space: _UpperCamelCase = getattr(__UpperCamelCase , pre_tok_state.pop('''type''' ) ) _UpperCamelCase = add_prefix_space _UpperCamelCase = pre_tok_class(**__UpperCamelCase ) _UpperCamelCase = add_prefix_space def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]: _UpperCamelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase ) return tuple(__UpperCamelCase ) def _UpperCamelCase ( self : List[str] , __UpperCamelCase : "Conversation" ) -> List[int]: _UpperCamelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [self.eos_token_id] ) if len(__UpperCamelCase ) > self.model_max_length: _UpperCamelCase = input_ids[-self.model_max_length :] return input_ids
420
0
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class lowercase__ : """simple docstring""" __UpperCamelCase : Optional[int] = None __UpperCamelCase : Optional[jnp.ndarray] = None __UpperCamelCase : Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def lowercase ( cls : Optional[Any] ): return cls() @dataclass class lowercase__ (__snake_case ): """simple docstring""" __UpperCamelCase : jnp.ndarray __UpperCamelCase : jnp.ndarray __UpperCamelCase : KarrasVeSchedulerState class lowercase__ (__snake_case , __snake_case ): """simple docstring""" @property def lowercase ( self : Tuple ): return True @register_to_config def __init__( self : List[str] , __a : float = 0.02 , __a : float = 1_0_0 , __a : float = 1.007 , __a : float = 8_0 , __a : float = 0.05 , __a : float = 5_0 , ): pass def lowercase ( self : List[Any] ): return KarrasVeSchedulerState.create() def lowercase ( self : Union[str, Any] , __a : KarrasVeSchedulerState , __a : int , __a : Tuple = () ): snake_case__ : List[Any] = jnp.arange(0 , __a )[::-1].copy() snake_case__ : int = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=__a , schedule=jnp.array(__a , dtype=jnp.floataa ) , timesteps=__a , ) def lowercase ( self : str , __a : KarrasVeSchedulerState , __a : jnp.ndarray , __a : float , __a : random.KeyArray , ): if self.config.s_min <= sigma <= self.config.s_max: snake_case__ : Any = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: snake_case__ : Union[str, Any] = 0 # sample eps ~ N(0, S_noise^2 * I) snake_case__ : str = random.split(__a , num=1 ) snake_case__ : int = self.config.s_noise * random.normal(key=__a , shape=sample.shape ) snake_case__ : Optional[int] = sigma + gamma * sigma snake_case__ : Optional[int] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def lowercase ( self : List[str] , __a : KarrasVeSchedulerState , __a : jnp.ndarray , __a : float , __a : float , __a : jnp.ndarray , __a : bool = True , ): snake_case__ : str = sample_hat + sigma_hat * model_output snake_case__ : List[str] = (sample_hat - pred_original_sample) / sigma_hat snake_case__ : Any = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__a , derivative=__a , state=__a ) def lowercase ( self : List[str] , __a : KarrasVeSchedulerState , __a : jnp.ndarray , __a : float , __a : float , __a : jnp.ndarray , __a : jnp.ndarray , __a : jnp.ndarray , __a : bool = True , ): snake_case__ : Any = sample_prev + sigma_prev * model_output snake_case__ : Union[str, Any] = (sample_prev - pred_original_sample) / sigma_prev snake_case__ : Tuple = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__a , derivative=__a , state=__a ) def lowercase ( self : Optional[int] , __a : KarrasVeSchedulerState , __a : int , __a : Optional[int] , __a : int ): raise NotImplementedError()
127
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_: str = logging.get_logger(__name__) lowercase_: List[str] = { 'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class lowercase__ (__snake_case ): """simple docstring""" __UpperCamelCase : Any = 'vit_msn' def __init__( self : Optional[int] , __a : List[str]=7_6_8 , __a : Union[str, Any]=1_2 , __a : Dict=1_2 , __a : Union[str, Any]=3_0_7_2 , __a : int="gelu" , __a : int=0.0 , __a : Dict=0.0 , __a : Optional[int]=0.02 , __a : Any=1e-06 , __a : Any=2_2_4 , __a : Tuple=1_6 , __a : List[Any]=3 , __a : Tuple=True , **__a : Optional[Any] , ): super().__init__(**__a ) snake_case__ : str = hidden_size snake_case__ : Union[str, Any] = num_hidden_layers snake_case__ : Tuple = num_attention_heads snake_case__ : Optional[int] = intermediate_size snake_case__ : int = hidden_act snake_case__ : List[str] = hidden_dropout_prob snake_case__ : List[Any] = attention_probs_dropout_prob snake_case__ : Tuple = initializer_range snake_case__ : Dict = layer_norm_eps snake_case__ : Union[str, Any] = image_size snake_case__ : Optional[int] = patch_size snake_case__ : Optional[int] = num_channels snake_case__ : Dict = qkv_bias
127
1
"""simple docstring""" import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class __lowerCAmelCase : '''simple docstring''' a__ = None a__ = False a__ = False a__ = False a__ = None a__ = None a__ = False a__ = False a__ = False a__ = True a__ = None a__ = 1 a__ = None a__ = False a__ = None a__ = None def _a ( self ): """simple docstring""" return self.__class__(**{k: copy.deepcopy(a ) for k, v in self.__dict__.items()} )
584
'''simple docstring''' def UpperCamelCase_ ( A__ ): if len(A__ ) <= 1: return lst a_ = 1 while i < len(A__ ): if lst[i - 1] <= lst[i]: i += 1 else: a_ , a_ = lst[i], lst[i - 1] i -= 1 if i == 0: a_ = 1 return lst if __name__ == "__main__": lowercase__ =input('Enter numbers separated by a comma:\n').strip() lowercase__ =[int(item) for item in user_input.split(',')] print(gnome_sort(unsorted))
263
0
from collections import Counter from timeit import timeit def lowerCamelCase ( SCREAMING_SNAKE_CASE = "" , ): '''simple docstring''' return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2 def lowerCamelCase ( SCREAMING_SNAKE_CASE = "" ): '''simple docstring''' if len(SCREAMING_SNAKE_CASE ) == 0: return True __UpperCamelCase :Union[str, Any] = input_str.replace(''' ''' , '''''' ).lower() # character_freq_dict: Stores the frequency of every character in the input string __UpperCamelCase :dict[str, int] = {} for character in lower_case_input_str: __UpperCamelCase :Optional[Any] = character_freq_dict.get(SCREAMING_SNAKE_CASE , 0 ) + 1 __UpperCamelCase :Optional[int] = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def lowerCamelCase ( SCREAMING_SNAKE_CASE = "" ): '''simple docstring''' print('''\nFor string = ''' , SCREAMING_SNAKE_CASE , ''':''' ) print( '''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(SCREAMING_SNAKE_CASE ) , '''\ttime =''' , timeit( '''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , ) print( '''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(SCREAMING_SNAKE_CASE ) , '''\ttime =''' , timeit( '''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , ) if __name__ == "__main__": __lowercase = input( '''Enter string to determine if it can be rearranged as a palindrome or not: ''' ).strip() benchmark(check_str) __lowercase = can_string_be_rearranged_as_palindrome_counter(check_str) print(F'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
710
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available __lowercase = { '''configuration_audio_spectrogram_transformer''': [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ASTConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ '''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ASTForAudioClassification''', '''ASTModel''', '''ASTPreTrainedModel''', ] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ['''ASTFeatureExtractor'''] if TYPE_CHECKING: from .configuration_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ASTConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ASTForAudioClassification, ASTModel, ASTPreTrainedModel, ) try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor else: import sys __lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
452
0
"""simple docstring""" from __future__ import annotations from collections.abc import Generator def snake_case ( ) -> Generator[int, None, None]: lowerCamelCase : dict[int, int] = {} lowerCamelCase : str = 2 while True: lowerCamelCase : int = factor_map.pop(UpperCamelCase__ , UpperCamelCase__ ) if factor: lowerCamelCase : List[Any] = factor + prime while x in factor_map: x += factor lowerCamelCase : int = factor else: lowerCamelCase : Optional[int] = prime yield prime prime += 1 def snake_case ( UpperCamelCase__ : float = 1E10 ) -> int: lowerCamelCase : Optional[int] = sieve() lowerCamelCase : List[str] = 1 while True: lowerCamelCase : Tuple = next(UpperCamelCase__ ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(UpperCamelCase__ ) n += 2 if __name__ == "__main__": print(solution())
222
"""simple docstring""" import math def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float: if initial_intensity < 0: raise ValueError("""The value of intensity cannot be negative""" ) # handling of negative values of initial intensity if angle < 0 or angle > 360: raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(UpperCamelCase__ ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name='malus_law')
222
1
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] )->float: return round(float(moles / volume ) * nfactor ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] )->float: return round(float((moles * 0.0821 * temperature) / (volume) ) ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple )->float: return round(float((moles * 0.0821 * temperature) / (pressure) ) ) def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] )->float: return round(float((pressure * volume) / (0.0821 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
716
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets UpperCAmelCase_ = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n" UpperCAmelCase_ = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n" UpperCAmelCase_ = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n" def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] )->Optional[Any]: def remove_articles(_SCREAMING_SNAKE_CASE : List[str] ): _lowerCAmelCase = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE ) return re.sub(_SCREAMING_SNAKE_CASE , ''' ''' , _SCREAMING_SNAKE_CASE ) def white_space_fix(_SCREAMING_SNAKE_CASE : List[Any] ): return " ".join(text.split() ) def remove_punc(_SCREAMING_SNAKE_CASE : Optional[Any] ): _lowerCAmelCase = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_SCREAMING_SNAKE_CASE : Optional[int] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] )->Any: return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) ) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str )->int: _lowerCAmelCase = [any(compute_exact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] return (sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )) * 1_0_0 def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] )->Optional[int]: _lowerCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams] _lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = Counter() for sgram, scount in sgramcounter.items(): _lowerCAmelCase = scount * numref _lowerCAmelCase = Counter(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = Counter() for cgram, ccount in cgramcounter.items(): _lowerCAmelCase = ccount * numref # KEEP _lowerCAmelCase = sgramcounter_rep & cgramcounter_rep _lowerCAmelCase = keepgramcounter_rep & rgramcounter _lowerCAmelCase = sgramcounter_rep & rgramcounter _lowerCAmelCase = 0 _lowerCAmelCase = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _lowerCAmelCase = 1 _lowerCAmelCase = 1 if len(_SCREAMING_SNAKE_CASE ) > 0: _lowerCAmelCase = keeptmpscorea / len(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) _lowerCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() ) _lowerCAmelCase = 0 if keepscore_precision > 0 or keepscore_recall > 0: _lowerCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION _lowerCAmelCase = sgramcounter_rep - cgramcounter_rep _lowerCAmelCase = delgramcounter_rep - rgramcounter _lowerCAmelCase = sgramcounter_rep - rgramcounter _lowerCAmelCase = 0 _lowerCAmelCase = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _lowerCAmelCase = 1 if len(_SCREAMING_SNAKE_CASE ) > 0: _lowerCAmelCase = deltmpscorea / len(_SCREAMING_SNAKE_CASE ) # ADDITION _lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) & set(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ) - set(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. _lowerCAmelCase = 1 _lowerCAmelCase = 1 if len(_SCREAMING_SNAKE_CASE ) > 0: _lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: _lowerCAmelCase = addtmpscore / len(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = 0 if addscore_precision > 0 or addscore_recall > 0: _lowerCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str )->List[Any]: _lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) _lowerCAmelCase = ssent.split(''' ''' ) _lowerCAmelCase = csent.split(''' ''' ) _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] for rsent in rsents: _lowerCAmelCase = rsent.split(''' ''' ) _lowerCAmelCase = [] _lowerCAmelCase = [] _lowerCAmelCase = [] ragramslist.append(_SCREAMING_SNAKE_CASE ) for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ): if i < len(_SCREAMING_SNAKE_CASE ) - 1: _lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] ragrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 2: _lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] ragrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 3: _lowerCAmelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3] ragrams.append(_SCREAMING_SNAKE_CASE ) ragramslist.append(_SCREAMING_SNAKE_CASE ) ragramslist.append(_SCREAMING_SNAKE_CASE ) ragramslist.append(_SCREAMING_SNAKE_CASE ) for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ): if i < len(_SCREAMING_SNAKE_CASE ) - 1: _lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] sagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 2: _lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] sagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 3: _lowerCAmelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3] sagrams.append(_SCREAMING_SNAKE_CASE ) for i in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ): if i < len(_SCREAMING_SNAKE_CASE ) - 1: _lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] cagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 2: _lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] cagrams.append(_SCREAMING_SNAKE_CASE ) if i < len(_SCREAMING_SNAKE_CASE ) - 3: _lowerCAmelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3] cagrams.append(_SCREAMING_SNAKE_CASE ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = SARIngram(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _lowerCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 _lowerCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4 _lowerCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4 _lowerCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool = True , _SCREAMING_SNAKE_CASE : str = "13a" , _SCREAMING_SNAKE_CASE : bool = True )->int: # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: _lowerCAmelCase = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: _lowerCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(_SCREAMING_SNAKE_CASE )()(_SCREAMING_SNAKE_CASE ) else: _lowerCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(_SCREAMING_SNAKE_CASE ) elif tokenizer == "moses": _lowerCAmelCase = sacremoses.MosesTokenizer().tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE , escape=_SCREAMING_SNAKE_CASE ) elif tokenizer == "penn": _lowerCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(_SCREAMING_SNAKE_CASE , return_str=_SCREAMING_SNAKE_CASE ) else: _lowerCAmelCase = sentence if not return_str: _lowerCAmelCase = normalized_sent.split() return normalized_sent def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] )->str: if not (len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )): raise ValueError('''Sources length must match predictions and references lengths.''' ) _lowerCAmelCase = 0 for src, pred, refs in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): sari_score += SARIsent(normalize(_SCREAMING_SNAKE_CASE ) , normalize(_SCREAMING_SNAKE_CASE ) , [normalize(_SCREAMING_SNAKE_CASE ) for sent in refs] ) _lowerCAmelCase = sari_score / len(_SCREAMING_SNAKE_CASE ) return 1_0_0 * sari_score def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any]="exp" , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Optional[int]=False , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : int=False , )->str: _lowerCAmelCase = len(references[0] ) if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) _lowerCAmelCase = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )] _lowerCAmelCase = sacrebleu.corpus_bleu( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , smooth_method=_SCREAMING_SNAKE_CASE , smooth_value=_SCREAMING_SNAKE_CASE , force=_SCREAMING_SNAKE_CASE , lowercase=_SCREAMING_SNAKE_CASE , use_effective_order=_SCREAMING_SNAKE_CASE , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class UpperCAmelCase ( datasets.Metric ): def __lowerCAmelCase ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=[ '''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''', '''https://github.com/cocoxu/simplification/blob/master/SARI.py''', '''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''', '''https://github.com/mjpost/sacreBLEU''', ] , reference_urls=[ '''https://www.aclweb.org/anthology/Q16-1029.pdf''', '''https://github.com/mjpost/sacreBLEU''', '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = {} result.update({'''sari''': compute_sari(sources=_lowerCAmelCase , predictions=_lowerCAmelCase , references=_lowerCAmelCase )} ) result.update({'''sacrebleu''': compute_sacrebleu(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} ) result.update({'''exact''': compute_em(predictions=_lowerCAmelCase , references=_lowerCAmelCase )} ) return result
664
0
"""simple docstring""" import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class _snake_case ( A__ ): '''simple docstring''' UpperCamelCase__ =(CMStochasticIterativeScheduler,) UpperCamelCase__ =10 def snake_case_ ( self : Tuple , **snake_case : Any ): UpperCAmelCase_ :Tuple = { '''num_train_timesteps''': 201, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } config.update(**snake_case ) return config def snake_case_ ( self : int ): UpperCAmelCase_ :List[Any] = 10 UpperCAmelCase_ :Union[str, Any] = self.get_scheduler_config() UpperCAmelCase_ :Any = self.scheduler_classes[0](**snake_case ) scheduler.set_timesteps(snake_case ) UpperCAmelCase_ :Union[str, Any] = scheduler.timesteps[0] UpperCAmelCase_ :int = scheduler.timesteps[1] UpperCAmelCase_ :Dict = self.dummy_sample UpperCAmelCase_ :Dict = 0.1 * sample UpperCAmelCase_ :List[str] = scheduler.step(snake_case , snake_case , snake_case ).prev_sample UpperCAmelCase_ :List[str] = scheduler.step(snake_case , snake_case , snake_case ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def snake_case_ ( self : Union[str, Any] ): for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=snake_case ) def snake_case_ ( self : Dict ): for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=snake_case ) def snake_case_ ( self : Any ): UpperCAmelCase_ :Union[str, Any] = self.scheduler_classes[0] UpperCAmelCase_ :List[str] = self.get_scheduler_config() UpperCAmelCase_ :Any = scheduler_class(**snake_case ) UpperCAmelCase_ :Tuple = 1 scheduler.set_timesteps(snake_case ) UpperCAmelCase_ :Union[str, Any] = scheduler.timesteps UpperCAmelCase_ :Tuple = torch.manual_seed(0 ) UpperCAmelCase_ :Dict = self.dummy_model() UpperCAmelCase_ :List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(snake_case ): # 1. scale model input UpperCAmelCase_ :int = scheduler.scale_model_input(snake_case , snake_case ) # 2. predict noise residual UpperCAmelCase_ :Optional[int] = model(snake_case , snake_case ) # 3. predict previous sample x_t-1 UpperCAmelCase_ :List[str] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample UpperCAmelCase_ :Dict = pred_prev_sample UpperCAmelCase_ :Any = torch.sum(torch.abs(snake_case ) ) UpperCAmelCase_ :Tuple = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 192.7_614 ) < 1e-2 assert abs(result_mean.item() - 0.2_510 ) < 1e-3 def snake_case_ ( self : Optional[int] ): UpperCAmelCase_ :List[Any] = self.scheduler_classes[0] UpperCAmelCase_ :Tuple = self.get_scheduler_config() UpperCAmelCase_ :Dict = scheduler_class(**snake_case ) UpperCAmelCase_ :Union[str, Any] = [106, 0] scheduler.set_timesteps(timesteps=snake_case ) UpperCAmelCase_ :Union[str, Any] = scheduler.timesteps UpperCAmelCase_ :Dict = torch.manual_seed(0 ) UpperCAmelCase_ :Optional[Any] = self.dummy_model() UpperCAmelCase_ :int = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input UpperCAmelCase_ :str = scheduler.scale_model_input(snake_case , snake_case ) # 2. predict noise residual UpperCAmelCase_ :Tuple = model(snake_case , snake_case ) # 3. predict previous sample x_t-1 UpperCAmelCase_ :Optional[int] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample UpperCAmelCase_ :int = pred_prev_sample UpperCAmelCase_ :List[str] = torch.sum(torch.abs(snake_case ) ) UpperCAmelCase_ :int = torch.mean(torch.abs(snake_case ) ) assert abs(result_sum.item() - 347.6_357 ) < 1e-2 assert abs(result_mean.item() - 0.4_527 ) < 1e-3 def snake_case_ ( self : Optional[int] ): UpperCAmelCase_ :Union[str, Any] = self.scheduler_classes[0] UpperCAmelCase_ :int = self.get_scheduler_config() UpperCAmelCase_ :str = scheduler_class(**snake_case ) UpperCAmelCase_ :int = [39, 30, 12, 15, 0] with self.assertRaises(snake_case , msg='''`timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=snake_case ) def snake_case_ ( self : List[str] ): UpperCAmelCase_ :Union[str, Any] = self.scheduler_classes[0] UpperCAmelCase_ :List[str] = self.get_scheduler_config() UpperCAmelCase_ :List[Any] = scheduler_class(**snake_case ) UpperCAmelCase_ :Dict = [39, 30, 12, 1, 0] UpperCAmelCase_ :str = len(snake_case ) with self.assertRaises(snake_case , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=snake_case , timesteps=snake_case ) def snake_case_ ( self : Any ): UpperCAmelCase_ :Dict = self.scheduler_classes[0] UpperCAmelCase_ :int = self.get_scheduler_config() UpperCAmelCase_ :int = scheduler_class(**snake_case ) UpperCAmelCase_ :Union[str, Any] = [scheduler.config.num_train_timesteps] with self.assertRaises( snake_case , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=snake_case )
608
"""simple docstring""" __lowerCamelCase = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], "C": ["A", "F", "G"], "D": ["B"], "E": ["A", "B", "D"], "F": ["C"], "G": ["C"], } def a ( __snake_case : dict, __snake_case : str, __snake_case : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ :List[Any] = set() # keep track of all the paths to be checked UpperCAmelCase_ :Any = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue UpperCAmelCase_ :Tuple = queue.pop(0 ) # get the last node from the path UpperCAmelCase_ :str = path[-1] if node not in explored: UpperCAmelCase_ :Any = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: UpperCAmelCase_ :Union[str, Any] = list(__snake_case ) new_path.append(__snake_case ) queue.append(__snake_case ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(__snake_case ) # in case there's no path between the 2 nodes return [] def a ( __snake_case : dict, __snake_case : int, __snake_case : str ): '''simple docstring''' if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 UpperCAmelCase_ :Optional[Any] = [start] UpperCAmelCase_ :str = set(__snake_case ) # Keep tab on distances from `start` node. UpperCAmelCase_ :Optional[Any] = {start: 0, target: -1} while queue: UpperCAmelCase_ :Optional[Any] = queue.pop(0 ) if node == target: UpperCAmelCase_ :str = ( dist[node] if dist[target] == -1 else min(dist[target], dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(__snake_case ) queue.append(__snake_case ) UpperCAmelCase_ :Optional[int] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, "G", "D")) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, "G", "D")) # returns 4
608
1
'''simple docstring''' from ... import PretrainedConfig lowerCamelCase_ : Optional[Any] = { '''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''', } class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): '''simple docstring''' __a : str = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP __a : Optional[int] = "nezha" def __init__( self : int , lowercase : Optional[Any]=2_1_1_2_8 , lowercase : Optional[Any]=7_6_8 , lowercase : Optional[Any]=1_2 , lowercase : Any=1_2 , lowercase : Union[str, Any]=3_0_7_2 , lowercase : Dict="gelu" , lowercase : str=0.1 , lowercase : Optional[int]=0.1 , lowercase : List[Any]=5_1_2 , lowercase : int=6_4 , lowercase : str=2 , lowercase : List[str]=0.0_2 , lowercase : List[str]=1e-12 , lowercase : Union[str, Any]=0.1 , lowercase : Any=0 , lowercase : Optional[Any]=2 , lowercase : Union[str, Any]=3 , lowercase : Optional[Any]=True , **lowercase : Tuple , ) -> Union[str, Any]: '''simple docstring''' super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) UpperCamelCase__ = vocab_size UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = hidden_act UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = max_position_embeddings UpperCamelCase__ = max_relative_position UpperCamelCase__ = type_vocab_size UpperCamelCase__ = initializer_range UpperCamelCase__ = layer_norm_eps UpperCamelCase__ = classifier_dropout UpperCamelCase__ = use_cache
265
'''simple docstring''' from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class _SCREAMING_SNAKE_CASE ( yaml.SafeLoader ): '''simple docstring''' def A ( self : List[str] , lowercase : List[Any] ) -> int: '''simple docstring''' UpperCamelCase__ = [self.constructed_objects[key_node] for key_node, _ in node.value] UpperCamelCase__ = [tuple(lowercase ) if isinstance(lowercase , lowercase ) else key for key in keys] UpperCamelCase__ = Counter(lowercase ) UpperCamelCase__ = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}" ) def A ( self : List[str] , lowercase : int , lowercase : str=False ) -> Any: '''simple docstring''' UpperCamelCase__ = super().construct_mapping(lowercase , deep=lowercase ) self._check_no_duplicates_on_constructed_node(lowercase ) return mapping def __magic_name__( _A ): '''simple docstring''' UpperCamelCase__ = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: UpperCamelCase__ = full_content[1:].index("""---""" ) + 1 UpperCamelCase__ = """\n""".join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(_A ) class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): '''simple docstring''' __a : Tuple = {"train_eval_index"} # train-eval-index in the YAML metadata @classmethod def A ( cls : int , lowercase : Path ) -> "DatasetMetadata": '''simple docstring''' with open(lowercase , encoding="""utf-8""" ) as readme_file: UpperCamelCase__ , UpperCamelCase__ = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(lowercase ) else: return cls() def A ( self : int , lowercase : Path ) -> Dict: '''simple docstring''' if path.exists(): with open(lowercase , encoding="""utf-8""" ) as readme_file: UpperCamelCase__ = readme_file.read() else: UpperCamelCase__ = None UpperCamelCase__ = self._to_readme(lowercase ) with open(lowercase , """w""" , encoding="""utf-8""" ) as readme_file: readme_file.write(lowercase ) def A ( self : Any , lowercase : Optional[str] = None ) -> str: '''simple docstring''' if readme_content is not None: UpperCamelCase__ , UpperCamelCase__ = _split_yaml_from_readme(lowercase ) UpperCamelCase__ = """---\n""" + self.to_yaml_string() + """---\n""" + content else: UpperCamelCase__ = """---\n""" + self.to_yaml_string() + """---\n""" return full_content @classmethod def A ( cls : Tuple , lowercase : str ) -> "DatasetMetadata": '''simple docstring''' UpperCamelCase__ = yaml.load(lowercase , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields UpperCamelCase__ = { (key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**lowercase ) def A ( self : Dict ) -> str: '''simple docstring''' return yaml.safe_dump( { (key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=lowercase , allow_unicode=lowercase , encoding="""utf-8""" , ).decode("""utf-8""" ) lowerCamelCase_ : str = { '''image-classification''': [], '''translation''': [], '''image-segmentation''': [], '''fill-mask''': [], '''automatic-speech-recognition''': [], '''token-classification''': [], '''sentence-similarity''': [], '''audio-classification''': [], '''question-answering''': [], '''summarization''': [], '''zero-shot-classification''': [], '''table-to-text''': [], '''feature-extraction''': [], '''other''': [], '''multiple-choice''': [], '''text-classification''': [], '''text-to-image''': [], '''text2text-generation''': [], '''zero-shot-image-classification''': [], '''tabular-classification''': [], '''tabular-regression''': [], '''image-to-image''': [], '''tabular-to-text''': [], '''unconditional-image-generation''': [], '''text-retrieval''': [], '''text-to-speech''': [], '''object-detection''': [], '''audio-to-audio''': [], '''text-generation''': [], '''conversational''': [], '''table-question-answering''': [], '''visual-question-answering''': [], '''image-to-text''': [], '''reinforcement-learning''': [], '''voice-activity-detection''': [], '''time-series-forecasting''': [], '''document-question-answering''': [], } if __name__ == "__main__": from argparse import ArgumentParser lowerCamelCase_ : Tuple = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''') ap.add_argument('''readme_filepath''') lowerCamelCase_ : str = ap.parse_args() lowerCamelCase_ : List[str] = Path(args.readme_filepath) lowerCamelCase_ : Tuple = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
265
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Dict = { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json", "allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json", "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json" ), } class _lowerCamelCase( _a ): lowercase_ : List[str] = """longformer""" def __init__( self, lowerCamelCase = 5_12, lowerCamelCase = 2, lowerCamelCase = 1, lowerCamelCase = 0, lowerCamelCase = 2, lowerCamelCase = 3_05_22, lowerCamelCase = 7_68, lowerCamelCase = 12, lowerCamelCase = 12, lowerCamelCase = 30_72, lowerCamelCase = "gelu", lowerCamelCase = 0.1, lowerCamelCase = 0.1, lowerCamelCase = 5_12, lowerCamelCase = 2, lowerCamelCase = 0.0_2, lowerCamelCase = 1E-12, lowerCamelCase = False, **lowerCamelCase, ) -> Optional[Any]: """simple docstring""" super().__init__(pad_token_id=lowerCamelCase, **lowerCamelCase) _lowercase : List[Any] = attention_window _lowercase : Any = sep_token_id _lowercase : Any = bos_token_id _lowercase : int = eos_token_id _lowercase : str = vocab_size _lowercase : Optional[Any] = hidden_size _lowercase : Union[str, Any] = num_hidden_layers _lowercase : Any = num_attention_heads _lowercase : Dict = hidden_act _lowercase : int = intermediate_size _lowercase : str = hidden_dropout_prob _lowercase : Optional[Any] = attention_probs_dropout_prob _lowercase : str = max_position_embeddings _lowercase : Optional[int] = type_vocab_size _lowercase : Optional[int] = initializer_range _lowercase : Any = layer_norm_eps _lowercase : Tuple = onnx_export class _lowerCamelCase( _a ): def __init__( self, lowerCamelCase, lowerCamelCase = "default", lowerCamelCase = None) -> Dict: """simple docstring""" super().__init__(lowerCamelCase, lowerCamelCase, lowerCamelCase) _lowercase : Any = True @property def UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": _lowercase : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _lowercase : Optional[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('global_attention_mask', dynamic_axis), ]) @property def UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]: """simple docstring""" _lowercase : Union[str, Any] = super().outputs if self.task == "default": _lowercase : int = {0: 'batch'} return outputs @property def UpperCamelCase ( self) -> float: """simple docstring""" return 1E-4 @property def UpperCamelCase ( self) -> int: """simple docstring""" return max(super().default_onnx_opset, 14) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = -1, lowerCamelCase = -1, lowerCamelCase = False, lowerCamelCase = None, ) -> Mapping[str, Any]: """simple docstring""" _lowercase : Optional[int] = super().generate_dummy_inputs( preprocessor=lowerCamelCase, batch_size=lowerCamelCase, seq_length=lowerCamelCase, is_pair=lowerCamelCase, framework=lowerCamelCase) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly _lowercase : Dict = torch.zeros_like(inputs['input_ids']) # make every second token global _lowercase : List[str] = 1 return inputs
89
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Any = { "ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json", } class _lowerCamelCase( _a ): lowercase_ : Any = """deta""" lowercase_ : Union[str, Any] = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self, lowerCamelCase=None, lowerCamelCase=9_00, lowerCamelCase=20_48, lowerCamelCase=6, lowerCamelCase=20_48, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1.0, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="sine", lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=True, lowerCamelCase=3_00, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=0.2_5, **lowerCamelCase, ) -> Any: """simple docstring""" if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.') _lowercase : List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4']) else: if isinstance(lowerCamelCase, lowerCamelCase): _lowercase : Dict = backbone_config.pop('model_type') _lowercase : int = CONFIG_MAPPING[backbone_model_type] _lowercase : Union[str, Any] = config_class.from_dict(lowerCamelCase) _lowercase : Union[str, Any] = backbone_config _lowercase : Any = num_queries _lowercase : Union[str, Any] = max_position_embeddings _lowercase : Union[str, Any] = d_model _lowercase : Optional[int] = encoder_ffn_dim _lowercase : Optional[int] = encoder_layers _lowercase : Optional[Any] = encoder_attention_heads _lowercase : Optional[Any] = decoder_ffn_dim _lowercase : Dict = decoder_layers _lowercase : Tuple = decoder_attention_heads _lowercase : Union[str, Any] = dropout _lowercase : Optional[Any] = attention_dropout _lowercase : int = activation_dropout _lowercase : Tuple = activation_function _lowercase : List[Any] = init_std _lowercase : Union[str, Any] = init_xavier_std _lowercase : int = encoder_layerdrop _lowercase : Optional[int] = auxiliary_loss _lowercase : Dict = position_embedding_type # deformable attributes _lowercase : Any = num_feature_levels _lowercase : str = encoder_n_points _lowercase : Any = decoder_n_points _lowercase : List[str] = two_stage _lowercase : Dict = two_stage_num_proposals _lowercase : Any = with_box_refine _lowercase : List[Any] = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('If two_stage is True, with_box_refine must be True.') # Hungarian matcher _lowercase : List[Any] = class_cost _lowercase : Optional[int] = bbox_cost _lowercase : str = giou_cost # Loss coefficients _lowercase : Optional[int] = mask_loss_coefficient _lowercase : int = dice_loss_coefficient _lowercase : List[Any] = bbox_loss_coefficient _lowercase : Optional[Any] = giou_loss_coefficient _lowercase : str = eos_coefficient _lowercase : int = focal_alpha super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase) @property def UpperCamelCase ( self) -> int: """simple docstring""" return self.encoder_attention_heads @property def UpperCamelCase ( self) -> int: """simple docstring""" return self.d_model def UpperCamelCase ( self) -> Union[str, Any]: """simple docstring""" _lowercase : int = copy.deepcopy(self.__dict__) _lowercase : Optional[int] = self.backbone_config.to_dict() _lowercase : Optional[Any] = self.__class__.model_type return output
89
1
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin UpperCAmelCase : List[str] = random.Random() if is_torch_available(): import torch def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None )-> Dict: """simple docstring""" if rng is None: snake_case_ = global_rng snake_case_ = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=4_00 , _UpperCAmelCase=20_00 , _UpperCAmelCase=1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1_60_00 , _UpperCAmelCase=True , _UpperCAmelCase=True , ): snake_case_ = parent snake_case_ = batch_size snake_case_ = min_seq_length snake_case_ = max_seq_length snake_case_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) snake_case_ = feature_size snake_case_ = padding_value snake_case_ = sampling_rate snake_case_ = return_attention_mask snake_case_ = do_normalize def UpperCamelCase__ ( self ): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCamelCase__ ( self , _UpperCAmelCase=False , _UpperCAmelCase=False ): def _flatten(_UpperCAmelCase ): return list(itertools.chain(*_UpperCAmelCase ) ) if equal_length: snake_case_ = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size snake_case_ = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: snake_case_ = [np.asarray(_UpperCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowerCAmelCase_ ( lowerCamelCase__ , unittest.TestCase ): '''simple docstring''' __snake_case = ASTFeatureExtractor def UpperCamelCase__ ( self ): snake_case_ = ASTFeatureExtractionTester(self ) def UpperCamelCase__ ( self ): # Tests that all call wrap to encode_plus and batch_encode_plus snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 snake_case_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] snake_case_ = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs] # Test not batched input snake_case_ = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values snake_case_ = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) ) # Test batched snake_case_ = feat_extract(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='''np''' ).input_values snake_case_ = feat_extract(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. snake_case_ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] snake_case_ = np.asarray(_UpperCAmelCase ) snake_case_ = feat_extract(_UpperCAmelCase , return_tensors='''np''' ).input_values snake_case_ = feat_extract(_UpperCAmelCase , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) ) @require_torch def UpperCamelCase__ ( self ): import torch snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case_ = np.random.rand(1_00 ).astype(np.floataa ) snake_case_ = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: snake_case_ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) snake_case_ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def UpperCamelCase__ ( self , _UpperCAmelCase ): from datasets import load_dataset snake_case_ = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech snake_case_ = ds.sort('''id''' ).select(range(_UpperCAmelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] @require_torch def UpperCamelCase__ ( self ): # fmt: off snake_case_ = torch.tensor( [-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776, -1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133, -1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936, -0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] ) # fmt: on snake_case_ = self._load_datasamples(1 ) snake_case_ = ASTFeatureExtractor() snake_case_ = feature_extractor(_UpperCAmelCase , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 10_24, 1_28) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , _UpperCAmelCase , atol=1E-4 ) )
702
from collections import namedtuple UpperCAmelCase = namedtuple("""from_to""", """from_ to""") UpperCAmelCase = { """cubicmeter""": from_to(1, 1), """litre""": from_to(0.001, 1000), """kilolitre""": from_to(1, 1), """gallon""": from_to(0.00_454, 264.172), """cubicyard""": from_to(0.76_455, 1.30_795), """cubicfoot""": from_to(0.028, 35.3_147), """cup""": from_to(0.000_236_588, 4_226.75), } def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> float: """simple docstring""" if from_type not in METRIC_CONVERSION: raise ValueError( f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n''' + ''', '''.join(SCREAMING_SNAKE_CASE ) ) if to_type not in METRIC_CONVERSION: raise ValueError( f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n''' + ''', '''.join(SCREAMING_SNAKE_CASE ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
531
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _UpperCamelCase = { 'configuration_bridgetower': [ 'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BridgeTowerConfig', 'BridgeTowerTextConfig', 'BridgeTowerVisionConfig', ], 'processing_bridgetower': ['BridgeTowerProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['BridgeTowerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ 'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST', 'BridgeTowerForContrastiveLearning', 'BridgeTowerForImageAndTextRetrieval', 'BridgeTowerForMaskedLM', 'BridgeTowerModel', 'BridgeTowerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
243
"""simple docstring""" import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def snake_case__ ( _snake_case : List[Any]=32 , _snake_case : Tuple=10 , _snake_case : str=1_00 , _snake_case : Optional[int]=10_26 , _snake_case : Any=True , _snake_case : str="data/tokenized_stories_train_wikitext103.jbl" , _snake_case : Any="igf_context_pairs.jbl" , ): """simple docstring""" set_seed(3 ) # generate train_data and objective_set UpperCamelCase__ , UpperCamelCase__ = generate_datasets( _snake_case , _snake_case , number=_snake_case , min_len=10_26 , trim=_snake_case ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? UpperCamelCase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) # load pretrained model UpperCamelCase__ = load_gpta("gpt2" ).to(_snake_case ) print("computing perplexity on objective set" ) UpperCamelCase__ = compute_perplexity(_snake_case , _snake_case , _snake_case ).item() print("perplexity on objective set:" , _snake_case ) # collect igf pairs and save to file demo.jbl collect_objective_set(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def snake_case__ ( _snake_case : Any , _snake_case : str=15 , _snake_case : str=1_28 , _snake_case : int=1_00 , _snake_case : Tuple="igf_model.pt" , ): """simple docstring""" set_seed(42 ) # Load pre-trained model UpperCamelCase__ = GPTaLMHeadModel.from_pretrained("gpt2" ) # Initialize secondary learner to use embedding weights of model UpperCamelCase__ = SecondaryLearner(_snake_case ) # Train secondary learner UpperCamelCase__ = train_secondary_learner( _snake_case , _snake_case , max_epochs=_snake_case , batch_size=_snake_case , eval_freq=1_00 , igf_model_path=_snake_case , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def snake_case__ ( _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : List[str]=32 , _snake_case : Tuple=10_00 , _snake_case : List[Any]=16 , _snake_case : str=1.0 , _snake_case : List[str]=recopy_gpta , _snake_case : Optional[int]=None , _snake_case : Optional[int]=10 , _snake_case : Optional[int]="gpt2_finetuned.pt" , ): """simple docstring""" UpperCamelCase__ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) UpperCamelCase__ = RandomSampler(_snake_case ) UpperCamelCase__ = DataLoader(_snake_case , sampler=_snake_case ) UpperCamelCase__ = max_steps // (len(_snake_case )) + 1 UpperCamelCase__ = 0 UpperCamelCase__ = torch.zeros((1, context_len) , dtype=torch.long , device=_snake_case ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = recopy_model(_snake_case , _snake_case , _snake_case ) model.train() if secondary_learner is not None: secondary_learner.to(_snake_case ) secondary_learner.eval() UpperCamelCase__ = [] UpperCamelCase__ = 0 UpperCamelCase__ = [] UpperCamelCase__ = [] # Compute the performance of the transformer model at the beginning UpperCamelCase__ = compute_perplexity(_snake_case , _snake_case , _snake_case ) test_perps.append(_snake_case ) print("Test perplexity, step" , _snake_case , ":" , _snake_case ) for epoch in range(int(_snake_case ) ): for step, example in enumerate(_snake_case ): torch.cuda.empty_cache() UpperCamelCase__ = random.randint(0 , example.size(2 ) - context_len - 1 ) UpperCamelCase__ = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() UpperCamelCase__ = model(_snake_case , labels=_snake_case ) UpperCamelCase__ = True if secondary_learner is not None: UpperCamelCase__ = secondary_learner.forward( torch.tensor(_snake_case , dtype=torch.long , device=_snake_case ).unsqueeze(0 ) )[0].item() observed_qs.append(float(_snake_case ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: UpperCamelCase__ = -1 if predicted_q < threshold: UpperCamelCase__ = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) UpperCamelCase__ = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() UpperCamelCase__ = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: UpperCamelCase__ = compute_perplexity(_snake_case , _snake_case , _snake_case ) test_perps.append(_snake_case ) print("Test perplexity, step" , _snake_case , ":" , _snake_case ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , _snake_case ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def snake_case__ ( ): """simple docstring""" UpperCamelCase__ = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" ) # Required parameters parser.add_argument( "--data_dir" , default=_snake_case , type=_snake_case , required=_snake_case , help="The input data dir. Should contain data files for WikiText." , ) parser.add_argument( "--model_name_or_path" , default=_snake_case , type=_snake_case , required=_snake_case , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--data_file" , type=_snake_case , default=_snake_case , help=( "A jbl file containing tokenized data which can be split as objective dataset, " "train_dataset and test_dataset." ) , ) parser.add_argument( "--igf_data_file" , type=_snake_case , default=_snake_case , help="A jbl file containing the context and information gain pairs to train secondary learner." , ) parser.add_argument( "--output_dir" , default=_snake_case , type=_snake_case , required=_snake_case , help="The output directory where the final fine-tuned model is stored." , ) parser.add_argument( "--tokenizer_name" , default=_snake_case , type=_snake_case , help="Pretrained tokenizer name or path if not the same as model_name" , ) parser.add_argument("--seed" , type=_snake_case , default=_snake_case , help="A seed for reproducible training." ) parser.add_argument( "--context_len" , default=32 , type=_snake_case , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--size_objective_set" , default=1_00 , type=_snake_case , help="number of articles that are long enough to be used as our objective set" , ) parser.add_argument( "--eval_freq" , default=1_00 , type=_snake_case , help="secondary model evaluation is triggered at eval_freq" ) parser.add_argument("--max_steps" , default=10_00 , type=_snake_case , help="To calculate training epochs" ) parser.add_argument( "--secondary_learner_batch_size" , default=1_28 , type=_snake_case , help="batch size of training data for secondary learner" , ) parser.add_argument( "--batch_size" , default=16 , type=_snake_case , help="batch size of training data of language model(gpt2) " ) parser.add_argument( "--eval_interval" , default=10 , type=_snake_case , help=( "decay the selectivity of our secondary learner filter from" "1 standard deviation above average to 1 below average after 10 batches" ) , ) parser.add_argument( "--number" , default=1_00 , type=_snake_case , help="The number of examples split to be used as objective_set/test_data" ) parser.add_argument( "--min_len" , default=10_26 , type=_snake_case , help="The minimum length of the article to be used as objective set" ) parser.add_argument( "--secondary_learner_max_epochs" , default=15 , type=_snake_case , help="number of epochs to train secondary learner" ) parser.add_argument("--trim" , default=_snake_case , type=_snake_case , help="truncate the example if it exceeds context length" ) parser.add_argument( "--threshold" , default=1.0 , type=_snake_case , help=( "The threshold value used by secondary learner to filter the train_data and allow only" " informative data as input to the model" ) , ) parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=_snake_case , help="finetuned_model_name" ) parser.add_argument( "--recopy_model" , default=_snake_case , type=_snake_case , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=_snake_case , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , ) # Load train data for secondary learner UpperCamelCase__ = joblib.load("data/IGF_values.jbl" ) # Train secondary learner UpperCamelCase__ = training_secondary_learner( _snake_case , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path="igf_model.pt" , ) # load pretrained gpt2 model UpperCamelCase__ = GPTaLMHeadModel.from_pretrained("gpt2" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model UpperCamelCase__ , UpperCamelCase__ = generate_datasets( context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=1_00 , min_len=10_26 , trim=_snake_case ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( _snake_case , _snake_case , _snake_case , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=_snake_case , secondary_learner=_snake_case , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , ) if __name__ == "__main__": main()
516
0
'''simple docstring''' from typing import List, Union import numpy as np from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, logging from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline _lowerCAmelCase : Any = logging.get_logger(__name__) class snake_case ( __lowerCamelCase ): """simple docstring""" def lowercase__ ( self , lowerCamelCase ) -> Union[str, Any]: """simple docstring""" if isinstance(lowerCamelCase , lowerCamelCase ): snake_case__ : Union[str, Any] = [label.strip() for label in labels.split(''',''' ) if label.strip()] return labels def __call__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]: """simple docstring""" if len(lowerCamelCase ) == 0 or len(lowerCamelCase ) == 0: raise ValueError('''You must include at least one label and at least one sequence.''' ) if hypothesis_template.format(labels[0] ) == hypothesis_template: raise ValueError( ( '''The provided hypothesis_template "{}" was not able to be formatted with the target labels. ''' '''Make sure the passed template includes formatting syntax such as {{}} where the label should go.''' ).format(lowerCamelCase ) ) if isinstance(lowerCamelCase , lowerCamelCase ): snake_case__ : Union[str, Any] = [sequences] snake_case__ : Union[str, Any] = [] for sequence in sequences: sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCamelCase )] for label in labels] ) return sequence_pairs, sequences @add_end_docstrings(__lowerCamelCase ) class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self , lowerCamelCase=ZeroShotClassificationArgumentHandler() , *lowerCamelCase , **lowerCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case__ : Optional[int] = args_parser super().__init__(*lowerCamelCase , **lowerCamelCase ) if self.entailment_id == -1: logger.warning( '''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to ''' '''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' ) @property def lowercase__ ( self ) -> Optional[Any]: """simple docstring""" for label, ind in self.model.config.labelaid.items(): if label.lower().startswith('''entail''' ): return ind return -1 def lowercase__ ( self , lowerCamelCase , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=TruncationStrategy.ONLY_FIRST , **lowerCamelCase ) -> Dict: """simple docstring""" snake_case__ : List[str] = self.framework if self.tokenizer.pad_token is None: # Override for tokenizers not supporting padding logger.error( '''Tokenizer was not supporting padding necessary for zero-shot, attempting to use ''' ''' `pad_token=eos_token`''' ) snake_case__ : List[Any] = self.tokenizer.eos_token try: snake_case__ : List[Any] = self.tokenizer( lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , ) except Exception as e: if "too short" in str(lowerCamelCase ): # tokenizers might yell that we want to truncate # to a value that is not even reached by the input. # In that case we don't want to truncate. # It seems there's not a really better way to catch that # exception. snake_case__ : Optional[Any] = self.tokenizer( lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors=lowerCamelCase , padding=lowerCamelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , ) else: raise e return inputs def lowercase__ ( self , **lowerCamelCase ) -> List[Any]: """simple docstring""" if kwargs.get('''multi_class''' , lowerCamelCase ) is not None: snake_case__ : Tuple = kwargs['''multi_class'''] logger.warning( '''The `multi_class` argument has been deprecated and renamed to `multi_label`. ''' '''`multi_class` will be removed in a future version of Transformers.''' ) snake_case__ : Union[str, Any] = {} if "candidate_labels" in kwargs: snake_case__ : List[Any] = self._args_parser._parse_labels(kwargs['''candidate_labels'''] ) if "hypothesis_template" in kwargs: snake_case__ : Dict = kwargs['''hypothesis_template'''] snake_case__ : Tuple = {} if "multi_label" in kwargs: snake_case__ : str = kwargs['''multi_label'''] return preprocess_params, {}, postprocess_params def __call__( self , lowerCamelCase , *lowerCamelCase , **lowerCamelCase , ) -> Optional[Any]: """simple docstring""" if len(lowerCamelCase ) == 0: pass elif len(lowerCamelCase ) == 1 and "candidate_labels" not in kwargs: snake_case__ : str = args[0] else: raise ValueError(f'''Unable to understand extra arguments {args}''' ) return super().__call__(lowerCamelCase , **lowerCamelCase ) def lowercase__ ( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase="This example is {}." ) -> Any: """simple docstring""" snake_case__ ,snake_case__ : Dict = self._args_parser(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCamelCase , lowerCamelCase ) ): snake_case__ : List[str] = self._parse_and_tokenize([sequence_pair] ) yield { "candidate_label": candidate_label, "sequence": sequences[0], "is_last": i == len(lowerCamelCase ) - 1, **model_input, } def lowercase__ ( self , lowerCamelCase ) -> str: """simple docstring""" snake_case__ : int = inputs['''candidate_label'''] snake_case__ : List[Any] = inputs['''sequence'''] snake_case__ : Optional[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names} snake_case__ : int = self.model(**lowerCamelCase ) snake_case__ : Union[str, Any] = { '''candidate_label''': candidate_label, '''sequence''': sequence, '''is_last''': inputs['''is_last'''], **outputs, } return model_outputs def lowercase__ ( self , lowerCamelCase , lowerCamelCase=False ) -> Union[str, Any]: """simple docstring""" snake_case__ : Any = [outputs['''candidate_label'''] for outputs in model_outputs] snake_case__ : List[str] = [outputs['''sequence'''] for outputs in model_outputs] snake_case__ : Any = np.concatenate([output['''logits'''].numpy() for output in model_outputs] ) snake_case__ : List[Any] = logits.shape[0] snake_case__ : str = len(lowerCamelCase ) snake_case__ : Tuple = N // n snake_case__ : List[Any] = logits.reshape((num_sequences, n, -1) ) if multi_label or len(lowerCamelCase ) == 1: # softmax over the entailment vs. contradiction dim for each label independently snake_case__ : List[str] = self.entailment_id snake_case__ : Tuple = -1 if entailment_id == 0 else 0 snake_case__ : str = reshaped_outputs[..., [contradiction_id, entailment_id]] snake_case__ : List[str] = np.exp(lowerCamelCase ) / np.exp(lowerCamelCase ).sum(-1 , keepdims=lowerCamelCase ) snake_case__ : str = scores[..., 1] else: # softmax the "entailment" logits over all candidate labels snake_case__ : Dict = reshaped_outputs[..., self.entailment_id] snake_case__ : List[str] = np.exp(lowerCamelCase ) / np.exp(lowerCamelCase ).sum(-1 , keepdims=lowerCamelCase ) snake_case__ : str = list(reversed(scores[0].argsort() ) ) return { "sequence": sequences[0], "labels": [candidate_labels[i] for i in top_inds], "scores": scores[0, top_inds].tolist(), }
694
'''simple docstring''' import socket def _A ( ): snake_case__ : Any = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) snake_case__ : str = socket.gethostname() snake_case__ : Union[str, Any] = 1_23_12 sock.connect((host, port) ) sock.send(B'''Hello server!''' ) with open('''Received_file''' , '''wb''' ) as out_file: print('''File opened''' ) print('''Receiving data...''' ) while True: snake_case__ : int = sock.recv(10_24 ) if not data: break out_file.write(snake_case__ ) print('''Successfully received the file''' ) sock.close() print('''Connection closed''' ) if __name__ == "__main__": main()
694
1
from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class lowerCAmelCase_ : """simple docstring""" def __init__( self :Tuple , lowerCamelCase__ :Optional[int] , ): UpperCamelCase__ :List[Any] = parent UpperCamelCase__ :str = 13 UpperCamelCase__ :Any = 7 UpperCamelCase__ :Optional[int] = 30 UpperCamelCase__ :List[str] = self.seq_length + self.mem_len UpperCamelCase__ :int = 15 UpperCamelCase__ :int = True UpperCamelCase__ :Any = True UpperCamelCase__ :Dict = 99 UpperCamelCase__ :int = [10, 50, 80] UpperCamelCase__ :List[Any] = 32 UpperCamelCase__ :Tuple = 32 UpperCamelCase__ :Tuple = 4 UpperCamelCase__ :List[str] = 8 UpperCamelCase__ :Optional[int] = 1_28 UpperCamelCase__ :int = 2 UpperCamelCase__ :List[str] = 2 UpperCamelCase__ :str = None UpperCamelCase__ :str = 1 UpperCamelCase__ :Optional[int] = 0 UpperCamelCase__ :Union[str, Any] = 3 UpperCamelCase__ :List[Any] = self.vocab_size - 1 UpperCamelCase__ :str = 0.01 def __a ( self :int ): UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ :Union[str, Any] = None if self.use_labels: UpperCamelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase__ :Dict = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def __a ( self :Union[str, Any] ): random.seed(self.seed ) tf.random.set_seed(self.seed ) def __a ( self :Union[str, Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Tuple ): UpperCamelCase__ :Optional[int] = TFTransfoXLModel(lowerCAmelCase__ ) UpperCamelCase__ , UpperCamelCase__ :List[str] = model(lowerCAmelCase__ ).to_tuple() UpperCamelCase__ :List[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a} UpperCamelCase__ , UpperCamelCase__ :List[str] = model(lowerCAmelCase__ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def __a ( self :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Any ): UpperCamelCase__ :Optional[Any] = TFTransfoXLLMHeadModel(lowerCAmelCase__ ) UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = model(lowerCAmelCase__ ).to_tuple() UpperCamelCase__ :int = {"""input_ids""": input_ids_a, """labels""": lm_labels} UpperCamelCase__ , UpperCamelCase__ :Dict = model(lowerCAmelCase__ ).to_tuple() UpperCamelCase__ , UpperCamelCase__ :Dict = model([input_ids_a, mems_a] ).to_tuple() UpperCamelCase__ :Dict = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels} UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = model(lowerCAmelCase__ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def __a ( self :Optional[int] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple ): UpperCamelCase__ :Tuple = TFTransfoXLForSequenceClassification(lowerCAmelCase__ ) UpperCamelCase__ :str = model(lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self :Optional[Any] ): UpperCamelCase__ :List[str] = self.prepare_config_and_inputs() ((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :str = config_and_inputs UpperCamelCase__ :List[str] = {"""input_ids""": input_ids_a} return config, inputs_dict @require_tf class lowerCAmelCase_ ( __a , __a , unittest.TestCase ): """simple docstring""" _snake_case : Optional[int] = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) _snake_case : Optional[int] = () if is_tf_available() else () _snake_case : int = ( { '''feature-extraction''': TFTransfoXLModel, '''text-classification''': TFTransfoXLForSequenceClassification, '''text-generation''': TFTransfoXLLMHeadModel, '''zero-shot''': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented _snake_case : Tuple = False _snake_case : Tuple = False _snake_case : int = False _snake_case : str = False def __a ( self :Union[str, Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any ): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def __a ( self :List[Any] ): UpperCamelCase__ :List[Any] = TFTransfoXLModelTester(self ) UpperCamelCase__ :List[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , d_embed=37 ) def __a ( self :List[Any] ): self.config_tester.run_common_tests() def __a ( self :Optional[Any] ): self.model_tester.set_seed() UpperCamelCase__ :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*lowerCAmelCase__ ) def __a ( self :str ): self.model_tester.set_seed() UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*lowerCAmelCase__ ) def __a ( self :int ): UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowerCAmelCase__ ) def __a ( self :int ): UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase__ :Dict = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: UpperCamelCase__ :Optional[int] = model_class(lowerCAmelCase__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: UpperCamelCase__ :int = model.get_output_embeddings() assert isinstance(lowerCAmelCase__ , tf.keras.layers.Layer ) UpperCamelCase__ :Optional[Any] = model.get_bias() assert name is None else: UpperCamelCase__ :Dict = model.get_output_embeddings() assert x is None UpperCamelCase__ :Optional[Any] = model.get_bias() assert name is None def __a ( self :List[str] ): # TODO JP: Make TransfoXL XLA compliant pass @slow def __a ( self :Optional[int] ): for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ :str = TFTransfoXLModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) @unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" ) def __a ( self :Optional[Any] ): pass @require_tf class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" @unittest.skip("""Skip test until #12651 is resolved.""" ) @slow def __a ( self :Any ): UpperCamelCase__ :Dict = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" ) # fmt: off UpperCamelCase__ :Union[str, Any] = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off UpperCamelCase__ :Any = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> UpperCamelCase__ :Union[str, Any] = model.generate(lowerCAmelCase__ , max_length=2_00 , do_sample=lowerCAmelCase__ ) self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase__ )
45
def __snake_case ( __magic_name__ ): '''simple docstring''' lowercase = set() # To detect a back edge, keep track of vertices currently in the recursion stack lowercase = set() return any( node not in visited and depth_first_search(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) for node in graph ) def __snake_case ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): '''simple docstring''' visited.add(__magic_name__ ) rec_stk.add(__magic_name__ ) for node in graph[vertex]: if node not in visited: if depth_first_search(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(__magic_name__ ) return False if __name__ == "__main__": from doctest import testmod testmod()
441
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowercase , lowercase , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = StableDiffusionSAGPipeline SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_PARAMS SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_BATCH_PARAMS SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE__ = TEXT_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE__ = False def __A ( self : Optional[Any] ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) UpperCAmelCase_ = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , ) torch.manual_seed(0 ) UpperCAmelCase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCAmelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) UpperCAmelCase_ = CLIPTextModel(lowerCAmelCase ) UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __A ( self : Any , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any]=0 ): '''simple docstring''' if str(lowerCAmelCase ).startswith("mps" ): UpperCAmelCase_ = torch.manual_seed(lowerCAmelCase ) else: UpperCAmelCase_ = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) UpperCAmelCase_ = { "prompt": ".", "generator": generator, "num_inference_steps": 2, "guidance_scale": 1.0, "sag_scale": 1.0, "output_type": "numpy", } return inputs def __A ( self : Any ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def __A ( self : int ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : List[Any] ): '''simple docstring''' UpperCAmelCase_ = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" ) UpperCAmelCase_ = sag_pipe.to(lowerCAmelCase ) sag_pipe.set_progress_bar_config(disable=lowerCAmelCase ) UpperCAmelCase_ = "." UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sag_pipe( [prompt] , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" ) UpperCAmelCase_ = output.images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def __A ( self : Dict ): '''simple docstring''' UpperCAmelCase_ = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" ) UpperCAmelCase_ = sag_pipe.to(lowerCAmelCase ) sag_pipe.set_progress_bar_config(disable=lowerCAmelCase ) UpperCAmelCase_ = "." UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sag_pipe( [prompt] , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" ) UpperCAmelCase_ = output.images UpperCAmelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def __A ( self : List[str] ): '''simple docstring''' UpperCAmelCase_ = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" ) UpperCAmelCase_ = sag_pipe.to(lowerCAmelCase ) sag_pipe.set_progress_bar_config(disable=lowerCAmelCase ) UpperCAmelCase_ = "." UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sag_pipe( [prompt] , width=768 , height=512 , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , ) UpperCAmelCase_ = output.images assert image.shape == (1, 512, 768, 3)
714
from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def __lowerCAmelCase ( A = True , *A , **A ): if not is_tqdm_available(): raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." ) UpperCAmelCase_ = False if main_process_only: UpperCAmelCase_ = PartialState().local_process_index == 0 return _tqdm(*A , **A , disable=A )
268
0
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= tempfile.mkdtemp() # fmt: off lowercase__ : int= ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on lowercase__ : Any= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) lowercase__ : List[str]= ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] lowercase__ : Optional[int]= {"unk_token": "<unk>"} lowercase__ : Optional[int]= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : str= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(snake_case__ ) ) lowercase__ : Optional[int]= { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], "image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } lowercase__ : List[str]= os.path.join(self.tmpdirname , snake_case__ ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(snake_case__ , snake_case__ ) def UpperCAmelCase_ ( self , **snake_case__ ): '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def UpperCAmelCase_ ( self , **snake_case__ ): '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **snake_case__ ) def UpperCAmelCase_ ( self , **snake_case__ ): '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowercase__ : Tuple= [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= self.get_tokenizer() lowercase__ : int= self.get_rust_tokenizer() lowercase__ : Dict= self.get_image_processor() lowercase__ : List[str]= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) processor_slow.save_pretrained(self.tmpdirname ) lowercase__ : List[Any]= CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case__ ) lowercase__ : int= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) processor_fast.save_pretrained(self.tmpdirname ) lowercase__ : str= CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , snake_case__ ) self.assertIsInstance(processor_fast.tokenizer , snake_case__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , snake_case__ ) self.assertIsInstance(processor_fast.image_processor , snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[int]= CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase__ : Optional[int]= self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowercase__ : Tuple= self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 ) lowercase__ : Optional[Any]= CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case__ ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= self.get_image_processor() lowercase__ : Optional[Any]= self.get_tokenizer() lowercase__ : Dict= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) lowercase__ : Union[str, Any]= self.prepare_image_inputs() lowercase__ : Optional[Any]= image_processor(snake_case__ , return_tensors="np" ) lowercase__ : str= processor(images=snake_case__ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= self.get_image_processor() lowercase__ : Dict= self.get_tokenizer() lowercase__ : List[Any]= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) lowercase__ : List[str]= "lower newer" lowercase__ : Optional[Any]= processor(text=snake_case__ ) lowercase__ : Dict= tokenizer(snake_case__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Union[str, Any]= self.get_image_processor() lowercase__ : List[str]= self.get_tokenizer() lowercase__ : str= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) lowercase__ : Dict= "lower newer" lowercase__ : Optional[Any]= self.prepare_image_inputs() lowercase__ : Optional[Any]= processor(text=snake_case__ , images=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(snake_case__ ): processor() def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= self.get_image_processor() lowercase__ : Tuple= self.get_tokenizer() lowercase__ : Optional[Any]= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) lowercase__ : List[Any]= self.prepare_image_inputs() lowercase__ : Dict= self.prepare_image_inputs() lowercase__ : List[Any]= processor(images=snake_case__ , visual_prompt=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] ) # test if it raises when no input is passed with pytest.raises(snake_case__ ): processor() def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : List[Any]= self.get_image_processor() lowercase__ : Optional[Any]= self.get_tokenizer() lowercase__ : List[str]= CLIPSegProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) lowercase__ : List[Any]= [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase__ : Union[str, Any]= processor.batch_decode(snake_case__ ) lowercase__ : List[str]= tokenizer.batch_decode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ )
218
"""simple docstring""" def lowercase__(A ) ->str: """simple docstring""" if isinstance(A , A ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(A , A ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" lowercase__ : Tuple= False if num < 0: lowercase__ : str= True lowercase__ : Dict= -num lowercase__ : list[int]= [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(A ) for e in binary ) return "0b" + "".join(str(A ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
218
1
def _snake_case ( __snake_case , __snake_case = False ): if not isinstance(__snake_case , __snake_case ): _UpperCamelCase = f"""Expected string as input, found {type(__snake_case )}""" raise ValueError(__snake_case ) if not isinstance(__snake_case , __snake_case ): _UpperCamelCase = f"""Expected boolean as use_pascal parameter, found {type(__snake_case )}""" raise ValueError(__snake_case ) _UpperCamelCase = input_str.split('''_''' ) _UpperCamelCase = 0 if use_pascal else 1 _UpperCamelCase = words[start_index:] _UpperCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize] _UpperCamelCase = '''''' if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
711
def _snake_case ( __snake_case ): if not isinstance(__snake_case , __snake_case ): raise TypeError('''Input value must be an \'int\' type''' ) _UpperCamelCase = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
71
0
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class _snake_case ( unittest.TestCase): def __init__( self : List[Any], __lowercase : str, __lowercase : Optional[Any]=13, __lowercase : int=7, __lowercase : List[Any]=True, __lowercase : Tuple=True, __lowercase : Tuple=True, __lowercase : int=True, __lowercase : List[Any]=99, __lowercase : Optional[int]=32, __lowercase : str=5, __lowercase : str=4, __lowercase : Union[str, Any]=37, __lowercase : List[Any]="gelu", __lowercase : Any=0.1, __lowercase : Dict=0.1, __lowercase : Any=512, __lowercase : List[str]=16, __lowercase : List[Any]=2, __lowercase : List[str]=0.02, __lowercase : Optional[Any]=4, ): lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_attention_mask lowercase__ = use_token_type_ids lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = num_choices def A__ ( self : List[str] ): lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowercase__ = None if self.use_attention_mask: lowercase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ = None if self.use_token_type_ids: lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) lowercase__ = RobertaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=__lowercase, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def A__ ( self : Dict ): lowercase__ = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs lowercase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def A__ ( self : List[Any] ): lowercase__ = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs lowercase__ = True lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase__ = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class _snake_case ( lowercase__ , unittest.TestCase): UpperCamelCase__ : str =True UpperCamelCase__ : Optional[int] =( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def A__ ( self : List[Any] ): lowercase__ = FlaxRobertaModelTester(self ) @slow def A__ ( self : Tuple ): for model_class_name in self.all_model_classes: lowercase__ = model_class_name.from_pretrained("roberta-base", from_pt=__lowercase ) lowercase__ = model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowercase )
413
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL lowercase_ = logging.get_logger(__name__) def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): def constraint_to_multiple_of(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=None ): lowercase__ = round(val / multiple ) * multiple if max_val is not None and x > max_val: lowercase__ = math.floor(val / multiple ) * multiple if x < min_val: lowercase__ = math.ceil(val / multiple ) * multiple return x lowercase__ = (output_size, output_size) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else output_size lowercase__ , lowercase__ = get_image_size(SCREAMING_SNAKE_CASE_ ) lowercase__ , lowercase__ = output_size # determine new height and width lowercase__ = output_height / input_height lowercase__ = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width lowercase__ = scale_width else: # fit height lowercase__ = scale_height lowercase__ = constraint_to_multiple_of(scale_height * input_height , multiple=SCREAMING_SNAKE_CASE_ ) lowercase__ = constraint_to_multiple_of(scale_width * input_width , multiple=SCREAMING_SNAKE_CASE_ ) return (new_height, new_width) class _snake_case ( lowercase__): UpperCamelCase__ : Tuple =["""pixel_values"""] def __init__( self : Any, __lowercase : bool = True, __lowercase : Dict[str, int] = None, __lowercase : PILImageResampling = PILImageResampling.BILINEAR, __lowercase : bool = False, __lowercase : int = 1, __lowercase : bool = True, __lowercase : Union[int, float] = 1 / 255, __lowercase : bool = True, __lowercase : Optional[Union[float, List[float]]] = None, __lowercase : Optional[Union[float, List[float]]] = None, **__lowercase : List[Any], ): super().__init__(**__lowercase ) lowercase__ = size if size is not None else {"height": 384, "width": 384} lowercase__ = get_size_dict(__lowercase ) lowercase__ = do_resize lowercase__ = size lowercase__ = keep_aspect_ratio lowercase__ = ensure_multiple_of lowercase__ = resample lowercase__ = do_rescale lowercase__ = rescale_factor lowercase__ = do_normalize lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def A__ ( self : List[Any], __lowercase : np.ndarray, __lowercase : Dict[str, int], __lowercase : bool = False, __lowercase : int = 1, __lowercase : PILImageResampling = PILImageResampling.BICUBIC, __lowercase : Optional[Union[str, ChannelDimension]] = None, **__lowercase : Union[str, Any], ): lowercase__ = get_size_dict(__lowercase ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) lowercase__ = get_resize_output_image_size( __lowercase, output_size=(size["height"], size["width"]), keep_aspect_ratio=__lowercase, multiple=__lowercase, ) return resize(__lowercase, size=__lowercase, resample=__lowercase, data_format=__lowercase, **__lowercase ) def A__ ( self : str, __lowercase : np.ndarray, __lowercase : Union[int, float], __lowercase : Optional[Union[str, ChannelDimension]] = None, **__lowercase : List[Any], ): return rescale(__lowercase, scale=__lowercase, data_format=__lowercase, **__lowercase ) def A__ ( self : Any, __lowercase : np.ndarray, __lowercase : Union[float, List[float]], __lowercase : Union[float, List[float]], __lowercase : Optional[Union[str, ChannelDimension]] = None, **__lowercase : Optional[Any], ): return normalize(__lowercase, mean=__lowercase, std=__lowercase, data_format=__lowercase, **__lowercase ) def A__ ( self : List[str], __lowercase : ImageInput, __lowercase : bool = None, __lowercase : int = None, __lowercase : bool = None, __lowercase : int = None, __lowercase : PILImageResampling = None, __lowercase : bool = None, __lowercase : float = None, __lowercase : bool = None, __lowercase : Optional[Union[float, List[float]]] = None, __lowercase : Optional[Union[float, List[float]]] = None, __lowercase : Optional[Union[str, TensorType]] = None, __lowercase : ChannelDimension = ChannelDimension.FIRST, **__lowercase : Tuple, ): lowercase__ = do_resize if do_resize is not None else self.do_resize lowercase__ = size if size is not None else self.size lowercase__ = get_size_dict(__lowercase ) lowercase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio lowercase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of lowercase__ = resample if resample is not None else self.resample lowercase__ = do_rescale if do_rescale is not None else self.do_rescale lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ = do_normalize if do_normalize is not None else self.do_normalize lowercase__ = image_mean if image_mean is not None else self.image_mean lowercase__ = image_std if image_std is not None else self.image_std lowercase__ = make_list_of_images(__lowercase ) if not valid_images(__lowercase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. lowercase__ = [to_numpy_array(__lowercase ) for image in images] if do_resize: lowercase__ = [self.resize(image=__lowercase, size=__lowercase, resample=__lowercase ) for image in images] if do_rescale: lowercase__ = [self.rescale(image=__lowercase, scale=__lowercase ) for image in images] if do_normalize: lowercase__ = [self.normalize(image=__lowercase, mean=__lowercase, std=__lowercase ) for image in images] lowercase__ = [to_channel_dimension_format(__lowercase, __lowercase ) for image in images] lowercase__ = {"pixel_values": images} return BatchFeature(data=__lowercase, tensor_type=__lowercase ) def A__ ( self : int, __lowercase : Optional[Any], __lowercase : List[Tuple] = None ): lowercase__ = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__lowercase ) != len(__lowercase ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(__lowercase ): lowercase__ = target_sizes.numpy() lowercase__ = [] for idx in range(len(__lowercase ) ): lowercase__ = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="bilinear", align_corners=__lowercase ) lowercase__ = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(__lowercase ) else: lowercase__ = logits.argmax(dim=1 ) lowercase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
413
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class UpperCamelCase ( _a , _a , unittest.TestCase ): """simple docstring""" A : Optional[int] = StableDiffusionPanoramaPipeline A : Any = TEXT_TO_IMAGE_PARAMS A : Any = TEXT_TO_IMAGE_BATCH_PARAMS A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS A : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS def SCREAMING_SNAKE_CASE_ ( self : Dict): """simple docstring""" torch.manual_seed(0) a : Tuple = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , ) a : Optional[int] = DDIMScheduler() torch.manual_seed(0) a : Union[str, Any] = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0) a : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) a : List[str] = CLIPTextModel(_A) a : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') a : Optional[int] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any]=0): """simple docstring""" a : int = torch.manual_seed(_A) a : List[Any] = { 'prompt': 'a photo of the dolomites', 'generator': generator, # Setting height and width to None to prevent OOMs on CPU. 'height': None, 'width': None, 'num_inference_steps': 1, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]): """simple docstring""" a : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator a : Union[str, Any] = self.get_dummy_components() a : Tuple = StableDiffusionPanoramaPipeline(**_A) a : Any = sd_pipe.to(_A) sd_pipe.set_progress_bar_config(disable=_A) a : List[str] = self.get_dummy_inputs(_A) a : int = sd_pipe(**_A).images a : int = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) a : Any = np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def SCREAMING_SNAKE_CASE_ ( self : Optional[int]): """simple docstring""" super().test_inference_batch_consistent(batch_sizes=[1, 2]) def SCREAMING_SNAKE_CASE_ ( self : List[str]): """simple docstring""" super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3) def SCREAMING_SNAKE_CASE_ ( self : Any): """simple docstring""" a : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator a : Dict = self.get_dummy_components() a : Any = StableDiffusionPanoramaPipeline(**_A) a : List[Any] = sd_pipe.to(_A) sd_pipe.set_progress_bar_config(disable=_A) a : Union[str, Any] = self.get_dummy_inputs(_A) a : Dict = 'french fries' a : Optional[int] = sd_pipe(**_A , negative_prompt=_A) a : int = output.images a : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) a : int = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]): """simple docstring""" a : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator a : List[Any] = self.get_dummy_components() a : Dict = StableDiffusionPanoramaPipeline(**_A) a : Dict = sd_pipe.to(_A) sd_pipe.set_progress_bar_config(disable=_A) a : Dict = self.get_dummy_inputs(_A) a : Union[str, Any] = sd_pipe(**_A , view_batch_size=2) a : Optional[int] = output.images a : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) a : Tuple = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def SCREAMING_SNAKE_CASE_ ( self : int): """simple docstring""" a : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator a : str = self.get_dummy_components() a : Tuple = EulerAncestralDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear') a : Tuple = StableDiffusionPanoramaPipeline(**_A) a : str = sd_pipe.to(_A) sd_pipe.set_progress_bar_config(disable=_A) a : Optional[Any] = self.get_dummy_inputs(_A) a : Tuple = sd_pipe(**_A).images a : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) a : List[str] = np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def SCREAMING_SNAKE_CASE_ ( self : Dict): """simple docstring""" a : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator a : Optional[Any] = self.get_dummy_components() a : int = PNDMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , skip_prk_steps=_A) a : Tuple = StableDiffusionPanoramaPipeline(**_A) a : List[Any] = sd_pipe.to(_A) sd_pipe.set_progress_bar_config(disable=_A) a : Any = self.get_dummy_inputs(_A) a : List[str] = sd_pipe(**_A).images a : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) a : List[str] = np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @slow @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self : Any): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[Any]=0): """simple docstring""" a : Dict = torch.manual_seed(_A) a : List[Any] = { 'prompt': 'a photo of the dolomites', 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE_ ( self : int): """simple docstring""" a : List[Any] = 'stabilityai/stable-diffusion-2-base' a : int = DDIMScheduler.from_pretrained(_A , subfolder='scheduler') a : Any = StableDiffusionPanoramaPipeline.from_pretrained(_A , scheduler=_A , safety_checker=_A) pipe.to(_A) pipe.set_progress_bar_config(disable=_A) pipe.enable_attention_slicing() a : str = self.get_inputs() a : List[Any] = pipe(**_A).images a : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 2_0_4_8, 3) a : Union[str, Any] = np.array( [ 0.36_96_83_92, 0.27_02_53_72, 0.32_44_67_66, 0.28_37_93_87, 0.36_36_32_74, 0.30_73_33_47, 0.27_10_00_27, 0.27_05_41_25, 0.25_53_60_96, ]) assert np.abs(expected_slice - image_slice).max() < 1e-2 def SCREAMING_SNAKE_CASE_ ( self : Dict): """simple docstring""" a : List[str] = StableDiffusionPanoramaPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-base' , safety_checker=_A) a : List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(_A) pipe.set_progress_bar_config(disable=_A) pipe.enable_attention_slicing() a : Optional[int] = self.get_inputs() a : Optional[int] = pipe(**_A).images a : Optional[int] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 2_0_4_8, 3) a : Optional[Any] = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]): """simple docstring""" a : Union[str, Any] = 0 def callback_fn(UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int) -> None: a : Optional[Any] = True nonlocal number_of_steps number_of_steps += 1 if step == 1: a : List[Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 2_5_6) a : Optional[int] = latents[0, -3:, -3:, -1] a : Tuple = np.array( [ 0.18_68_18_69, 0.33_90_78_16, 0.5_36_12_76, 0.14_43_28_65, -0.02_85_66_11, -0.73_94_11_23, 0.23_39_79_87, 0.47_32_26_82, -0.37_82_31_64, ]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 elif step == 2: a : List[Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 2_5_6) a : List[str] = latents[0, -3:, -3:, -1] a : List[Any] = np.array( [ 0.18_53_96_45, 0.33_98_72_48, 0.5_37_85_59, 0.14_43_71_42, -0.02_45_52_61, -0.7_33_83_17, 0.23_99_07_55, 0.47_35_62_72, -0.3_78_65_05, ]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 a : int = False a : Union[str, Any] = 'stabilityai/stable-diffusion-2-base' a : List[str] = DDIMScheduler.from_pretrained(_A , subfolder='scheduler') a : Dict = StableDiffusionPanoramaPipeline.from_pretrained(_A , scheduler=_A , safety_checker=_A) a : List[Any] = pipe.to(_A) pipe.set_progress_bar_config(disable=_A) pipe.enable_attention_slicing() a : str = self.get_inputs() pipe(**_A , callback=_A , callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == 3 def SCREAMING_SNAKE_CASE_ ( self : List[Any]): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() a : Dict = 'stabilityai/stable-diffusion-2-base' a : Optional[int] = DDIMScheduler.from_pretrained(_A , subfolder='scheduler') a : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(_A , scheduler=_A , safety_checker=_A) a : Union[str, Any] = pipe.to(_A) pipe.set_progress_bar_config(disable=_A) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() a : Optional[Any] = self.get_inputs() a : Union[str, Any] = pipe(**_A) a : str = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 1_0**9
714
'''simple docstring''' def SCREAMING_SNAKE_CASE__ ( snake_case : int = 10**9 ) -> int: """simple docstring""" a : List[str] = 1 a : Any = 2 a : List[Any] = 0 a : Optional[Any] = 0 a : Union[str, Any] = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value a : Union[str, Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f'''{solution() = }''')
610
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def _lowerCamelCase ( __lowerCamelCase ) -> Any: '''simple docstring''' UpperCAmelCase__ : List[Any] = 384 if "tiny" in model_name: UpperCAmelCase__ : Dict = [3, 3, 9, 3] UpperCAmelCase__ : int = [96, 192, 384, 768] if "small" in model_name: UpperCAmelCase__ : Optional[int] = [3, 3, 27, 3] UpperCAmelCase__ : Dict = [96, 192, 384, 768] if "base" in model_name: UpperCAmelCase__ : str = [3, 3, 27, 3] UpperCAmelCase__ : Optional[Any] = [128, 256, 512, 1024] UpperCAmelCase__ : int = 512 if "large" in model_name: UpperCAmelCase__ : List[str] = [3, 3, 27, 3] UpperCAmelCase__ : Tuple = [192, 384, 768, 1536] UpperCAmelCase__ : Any = 768 if "xlarge" in model_name: UpperCAmelCase__ : int = [3, 3, 27, 3] UpperCAmelCase__ : int = [256, 512, 1024, 2048] UpperCAmelCase__ : int = 1024 # set label information UpperCAmelCase__ : Tuple = 150 UpperCAmelCase__ : int = """huggingface/label-files""" UpperCAmelCase__ : Tuple = """ade20k-id2label.json""" UpperCAmelCase__ : int = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) ) UpperCAmelCase__ : Any = {int(__lowerCamelCase ): v for k, v in idalabel.items()} UpperCAmelCase__ : Dict = {v: k for k, v in idalabel.items()} UpperCAmelCase__ : Optional[int] = ConvNextConfig( depths=__lowerCamelCase , hidden_sizes=__lowerCamelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) UpperCAmelCase__ : str = UperNetConfig( backbone_config=__lowerCamelCase , auxiliary_in_channels=__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , ) return config def _lowerCamelCase ( __lowerCamelCase ) -> List[str]: '''simple docstring''' UpperCAmelCase__ : Dict = [] # fmt: off # stem rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") ) rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") ) rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") ) rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"backbone.stages.{i}.{j}.gamma", F"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") ) rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.weight", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") ) rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.bias", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") ) rename_keys.append((F"backbone.stages.{i}.{j}.norm.weight", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") ) rename_keys.append((F"backbone.stages.{i}.{j}.norm.bias", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") ) rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") ) rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") ) rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") ) rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") ) if i > 0: rename_keys.append((F"backbone.downsample_layers.{i}.0.weight", F"backbone.encoder.stages.{i}.downsampling_layer.0.weight") ) rename_keys.append((F"backbone.downsample_layers.{i}.0.bias", F"backbone.encoder.stages.{i}.downsampling_layer.0.bias") ) rename_keys.append((F"backbone.downsample_layers.{i}.1.weight", F"backbone.encoder.stages.{i}.downsampling_layer.1.weight") ) rename_keys.append((F"backbone.downsample_layers.{i}.1.bias", F"backbone.encoder.stages.{i}.downsampling_layer.1.bias") ) rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") ) rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str: '''simple docstring''' UpperCAmelCase__ : str = dct.pop(__lowerCamelCase ) UpperCAmelCase__ : Union[str, Any] = val def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : Tuple = { """upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""", """upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""", """upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""", """upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""", """upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""", } UpperCAmelCase__ : List[Any] = model_name_to_url[model_name] UpperCAmelCase__ : List[Any] = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="""cpu""" )["""state_dict"""] UpperCAmelCase__ : Dict = get_upernet_config(__lowerCamelCase ) UpperCAmelCase__ : int = UperNetForSemanticSegmentation(__lowerCamelCase ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): UpperCAmelCase__ : Optional[Any] = state_dict.pop(__lowerCamelCase ) if "bn" in key: UpperCAmelCase__ : int = key.replace("""bn""" , """batch_norm""" ) UpperCAmelCase__ : str = val # rename keys UpperCAmelCase__ : Optional[Any] = create_rename_keys(__lowerCamelCase ) for src, dest in rename_keys: rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) model.load_state_dict(__lowerCamelCase ) # verify on image UpperCAmelCase__ : Tuple = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg""" UpperCAmelCase__ : Tuple = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("""RGB""" ) UpperCAmelCase__ : Dict = SegformerImageProcessor() UpperCAmelCase__ : Union[str, Any] = processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values with torch.no_grad(): UpperCAmelCase__ : Any = model(__lowerCamelCase ) if model_name == "upernet-convnext-tiny": UpperCAmelCase__ : Optional[Any] = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ) elif model_name == "upernet-convnext-small": UpperCAmelCase__ : Optional[Any] = torch.tensor( [[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] ) elif model_name == "upernet-convnext-base": UpperCAmelCase__ : int = torch.tensor( [[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] ) elif model_name == "upernet-convnext-large": UpperCAmelCase__ : List[Any] = torch.tensor( [[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] ) elif model_name == "upernet-convnext-xlarge": UpperCAmelCase__ : Union[str, Any] = torch.tensor( [[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] ) print("""Logits:""" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCamelCase , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__lowerCamelCase ) print(F"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(__lowerCamelCase ) if push_to_hub: print(F"Pushing model and processor for {model_name} to hub" ) model.push_to_hub(F"openmmlab/{model_name}" ) processor.push_to_hub(F"openmmlab/{model_name}" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-convnext-tiny""", type=str, choices=[f'''upernet-convnext-{size}''' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]], help="""Name of the ConvNext UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
79
"""simple docstring""" import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training') # TF training parameters a : Optional[Any] = False a : str = False def __magic_name__ ( UpperCamelCase : Namespace ) -> Optional[int]: return TrainCommand(UpperCamelCase ) class lowercase(_lowercase ): @staticmethod def lowercase__ ( __SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" a__ = parser.add_parser('train' , help='CLI tool to train a model on a task.' ) train_parser.add_argument( '--train_data' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , ) train_parser.add_argument( '--column_label' , type=__SCREAMING_SNAKE_CASE , default=0 , help='Column of the dataset csv file with example labels.' ) train_parser.add_argument( '--column_text' , type=__SCREAMING_SNAKE_CASE , default=1 , help='Column of the dataset csv file with example texts.' ) train_parser.add_argument( '--column_id' , type=__SCREAMING_SNAKE_CASE , default=2 , help='Column of the dataset csv file with example ids.' ) train_parser.add_argument( '--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' ) train_parser.add_argument('--validation_data' , type=__SCREAMING_SNAKE_CASE , default='' , help='path to validation dataset.' ) train_parser.add_argument( '--validation_split' , type=__SCREAMING_SNAKE_CASE , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , ) train_parser.add_argument('--output' , type=__SCREAMING_SNAKE_CASE , default='./' , help='path to saved the trained model.' ) train_parser.add_argument( '--task' , type=__SCREAMING_SNAKE_CASE , default='text_classification' , help='Task to train the model on.' ) train_parser.add_argument( '--model' , type=__SCREAMING_SNAKE_CASE , default='bert-base-uncased' , help='Model\'s name or path to stored model.' ) train_parser.add_argument('--train_batch_size' , type=__SCREAMING_SNAKE_CASE , default=3_2 , help='Batch size for training.' ) train_parser.add_argument('--valid_batch_size' , type=__SCREAMING_SNAKE_CASE , default=6_4 , help='Batch size for validation.' ) train_parser.add_argument('--learning_rate' , type=__SCREAMING_SNAKE_CASE , default=3e-5 , help='Learning rate.' ) train_parser.add_argument('--adam_epsilon' , type=__SCREAMING_SNAKE_CASE , default=1e-08 , help='Epsilon for Adam optimizer.' ) train_parser.set_defaults(func=__SCREAMING_SNAKE_CASE ) def __init__( self , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" a__ = logging.get_logger('transformers-cli/training' ) a__ = 'tf' if is_tf_available() else 'torch' os.makedirs(args.output , exist_ok=__SCREAMING_SNAKE_CASE ) a__ = args.output a__ = args.column_label a__ = args.column_text a__ = args.column_id self.logger.info(f'Loading {args.task} pipeline for {args.model}' ) if args.task == "text_classification": a__ = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(f'Loading dataset from {args.train_data}' ) a__ = Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) a__ = None if args.validation_data: self.logger.info(f'Loading validation dataset from {args.validation_data}' ) a__ = Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) a__ = args.validation_split a__ = args.train_batch_size a__ = args.valid_batch_size a__ = args.learning_rate a__ = args.adam_epsilon def lowercase__ ( self ) -> Tuple: """simple docstring""" if self.framework == "tf": return self.run_tf() return self.run_torch() def lowercase__ ( self ) -> Any: """simple docstring""" raise NotImplementedError def lowercase__ ( self ) -> Dict: """simple docstring""" self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
273
0
'''simple docstring''' import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration __lowerCamelCase = [ # tf -> hf ("/", "."), ("layer_", "layers."), ("kernel", "weight"), ("beta", "bias"), ("gamma", "weight"), ("pegasus", "model"), ] __lowerCamelCase = [ (".output.dense", ".fc2"), ("intermediate.LayerNorm", "final_layer_norm"), ("intermediate.dense", "fc1"), ] __lowerCamelCase = ( INIT_COMMON + [ ("attention.self.LayerNorm", "self_attn_layer_norm"), ("attention.output.dense", "self_attn.out_proj"), ("attention.self", "self_attn"), ("attention.encdec.LayerNorm", "encoder_attn_layer_norm"), ("attention.encdec_output.dense", "encoder_attn.out_proj"), ("attention.encdec", "encoder_attn"), ("key", "k_proj"), ("value", "v_proj"), ("query", "q_proj"), ("decoder.LayerNorm", "decoder.layernorm_embedding"), ] + END_COMMON ) __lowerCamelCase = ( INIT_COMMON + [ ("embeddings.word_embeddings", "shared.weight"), ("embeddings.position_embeddings", "embed_positions.weight"), ("attention.self.LayerNorm", "self_attn_layer_norm"), ("attention.output.dense", "self_attn.output"), ("attention.self", "self_attn.self"), ("encoder.LayerNorm", "encoder.layernorm_embedding"), ] + END_COMMON ) __lowerCamelCase = [ "encdec/key/bias", "encdec/query/bias", "encdec/value/bias", "self/key/bias", "self/query/bias", "self/value/bias", "encdec_output/dense/bias", "attention/output/dense/bias", ] def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Any: for tf_name, hf_name in patterns: A_ = k.replace(__A, __A ) return k def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int: A_ = BigBirdPegasusConfig(**__A ) A_ = BigBirdPegasusForConditionalGeneration(__A ) A_ = torch_model.state_dict() A_ = {} # separating decoder weights A_ = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )} A_ = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )} for k, v in tqdm(decoder_weights.items(), """tf -> hf conversion""" ): A_ = [k.endswith(__A ) for ending in KEYS_TO_IGNORE] if any(__A ): continue A_ = DECODER_PATTERNS A_ = rename_state_dict_key(__A, __A ) if new_k not in state_dict: raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): A_ = v.T A_ = torch.from_numpy(__A ) assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' for k, v in tqdm(remaining_weights.items(), """tf -> hf conversion""" ): A_ = [k.endswith(__A ) for ending in KEYS_TO_IGNORE] if any(__A ): continue A_ = REMAINING_PATTERNS A_ = rename_state_dict_key(__A, __A ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): A_ = v.T A_ = torch.from_numpy(__A ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}''' A_ = mapping["""model.embed_positions.weight"""] A_ = mapping.pop("""model.embed_positions.weight""" ) A_ = torch_model.load_state_dict(__A, strict=__A ) A_ = [ k for k in missing if k not in [ """final_logits_bias""", """model.encoder.embed_tokens.weight""", """model.decoder.embed_tokens.weight""", """lm_head.weight""", ] ] assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}''' assert extra == [], F'''no matches found for the following tf keys {extra}''' return torch_model def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int: A_ = tf.train.list_variables(__A ) A_ = {} A_ = ["""global_step"""] for name, shape in tqdm(__A, desc="""converting tf checkpoint to dict""" ): A_ = any(pat in name for pat in ignore_name ) if skip_key: continue A_ = tf.train.load_variable(__A, __A ) A_ = array return tf_weights def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> str: A_ = get_tf_weights_as_numpy(__A ) A_ = convert_bigbird_pegasus(__A, __A ) torch_model.save_pretrained(__A ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''') parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''') __lowerCamelCase = parser.parse_args() __lowerCamelCase = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
715
'''simple docstring''' import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> List[Any]: # Load configuration defined in the metadata file with open(UpperCAmelCase__ ) as metadata_file: A_ = json.load(UpperCAmelCase__ ) A_ = LukeConfig(use_entity_aware_attention=UpperCAmelCase__, **metadata["""model_config"""] ) # Load in the weights from the checkpoint_path A_ = torch.load(UpperCAmelCase__, map_location="""cpu""" )["""module"""] # Load the entity vocab file A_ = load_original_entity_vocab(UpperCAmelCase__ ) # add an entry for [MASK2] A_ = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 A_ = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] ) # Add special tokens to the token vocabulary for downstream tasks A_ = AddedToken("""<ent>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ ) A_ = AddedToken("""<ent2>""", lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ ) tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(UpperCAmelCase__ ) with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """r""" ) as f: A_ = json.load(UpperCAmelCase__ ) A_ = """MLukeTokenizer""" with open(os.path.join(UpperCAmelCase__, """tokenizer_config.json""" ), """w""" ) as f: json.dump(UpperCAmelCase__, UpperCAmelCase__ ) with open(os.path.join(UpperCAmelCase__, MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ), """w""" ) as f: json.dump(UpperCAmelCase__, UpperCAmelCase__ ) A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ ) # Initialize the embeddings of the special tokens A_ = tokenizer.convert_tokens_to_ids(["""@"""] )[0] A_ = tokenizer.convert_tokens_to_ids(["""#"""] )[0] A_ = state_dict["""embeddings.word_embeddings.weight"""] A_ = word_emb[ent_init_index].unsqueeze(0 ) A_ = word_emb[enta_init_index].unsqueeze(0 ) A_ = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: A_ = state_dict[bias_name] A_ = decoder_bias[ent_init_index].unsqueeze(0 ) A_ = decoder_bias[enta_init_index].unsqueeze(0 ) A_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: A_ = F'''encoder.layer.{layer_index}.attention.self.''' A_ = state_dict[prefix + matrix_name] A_ = state_dict[prefix + matrix_name] A_ = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks A_ = state_dict["""entity_embeddings.entity_embeddings.weight"""] A_ = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 ) A_ = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' A_ = state_dict["""entity_predictions.bias"""] A_ = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 ) A_ = torch.cat([entity_prediction_bias, entity_mask_bias] ) A_ = LukeForMaskedLM(config=UpperCAmelCase__ ).eval() state_dict.pop("""entity_predictions.decoder.weight""" ) state_dict.pop("""lm_head.decoder.weight""" ) state_dict.pop("""lm_head.decoder.bias""" ) A_ = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )): A_ = state_dict[key] else: A_ = state_dict[key] A_ , A_ = model.load_state_dict(UpperCAmelCase__, strict=UpperCAmelCase__ ) if set(UpperCAmelCase__ ) != {"luke.embeddings.position_ids"}: raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' ) if set(UpperCAmelCase__ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__, task="""entity_classification""" ) A_ = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).""" A_ = (0, 9) A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" ) A_ = model(**UpperCAmelCase__ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base A_ = torch.Size((1, 33, 7_68) ) A_ = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base A_ = torch.Size((1, 1, 7_68) ) A_ = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' F''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], UpperCAmelCase__, atol=1e-4 ): raise ValueError # Verify masked word/entity prediction A_ = MLukeTokenizer.from_pretrained(UpperCAmelCase__ ) A_ = """Tokyo is the capital of <mask>.""" A_ = (24, 30) A_ = tokenizer(UpperCAmelCase__, entity_spans=[span], return_tensors="""pt""" ) A_ = model(**UpperCAmelCase__ ) A_ = encoding["""input_ids"""][0].tolist() A_ = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) ) A_ = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(UpperCAmelCase__ ) A_ = outputs.entity_logits[0][0].argmax().item() A_ = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("""Saving PyTorch model to {}""".format(UpperCAmelCase__ ) ) model.save_pretrained(UpperCAmelCase__ ) def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int: A_ = ["""[MASK]""", """[PAD]""", """[UNK]"""] A_ = [json.loads(UpperCAmelCase__ ) for line in open(UpperCAmelCase__ )] A_ = {} for entry in data: A_ = entry["""id"""] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: A_ = entity_id break A_ = F'''{language}:{entity_name}''' A_ = entity_id return new_mapping if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) __lowerCamelCase = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
667
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A_ : Optional[int] = { "configuration_chinese_clip": [ "CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "ChineseCLIPConfig", "ChineseCLIPOnnxConfig", "ChineseCLIPTextConfig", "ChineseCLIPVisionConfig", ], "processing_chinese_clip": ["ChineseCLIPProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Dict = ["ChineseCLIPFeatureExtractor"] A_ : List[str] = ["ChineseCLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Optional[Any] = [ "CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "ChineseCLIPModel", "ChineseCLIPPreTrainedModel", "ChineseCLIPTextModel", "ChineseCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_chinese_clip import ( CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, ChineseCLIPConfig, ChineseCLIPOnnxConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig, ) from .processing_chinese_clip import ChineseCLIPProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_chinese_clip import ( CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, ChineseCLIPModel, ChineseCLIPPreTrainedModel, ChineseCLIPTextModel, ChineseCLIPVisionModel, ) else: import sys A_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
196
"""simple docstring""" import numpy as np def A ( snake_case__ ): '''simple docstring''' return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
196
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase = { 'configuration_longformer': [ 'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongformerConfig', 'LongformerOnnxConfig', ], 'tokenization_longformer': ['LongformerTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['LongformerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ 'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'LongformerForMaskedLM', 'LongformerForMultipleChoice', 'LongformerForQuestionAnswering', 'LongformerForSequenceClassification', 'LongformerForTokenClassification', 'LongformerModel', 'LongformerPreTrainedModel', 'LongformerSelfAttention', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ 'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLongformerForMaskedLM', 'TFLongformerForMultipleChoice', 'TFLongformerForQuestionAnswering', 'TFLongformerForSequenceClassification', 'TFLongformerForTokenClassification', 'TFLongformerModel', 'TFLongformerPreTrainedModel', 'TFLongformerSelfAttention', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
716
import requests from bsa import BeautifulSoup def _a ( SCREAMING_SNAKE_CASE = "AAPL" ): """simple docstring""" lowercase__ = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}' lowercase__ = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE ).text , '''html.parser''' ) lowercase__ = '''My(6px) Pos(r) smartphone_Mt(6px)''' return soup.find('''div''' , class_=class_ ).find('''span''' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
429
0
def lowercase_ (A : str ): return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(A ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__("doctest").testmod()
478
import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training") # TF training parameters a_ :List[str] = False a_ :Any = False def lowercase_ (A : Namespace ): return TrainCommand(A ) class snake_case__ ( lowerCAmelCase_ ): """simple docstring""" @staticmethod def lowercase_ ( _snake_case : ArgumentParser ) ->Dict: snake_case__ : List[str] = parser.add_parser('train', help='CLI tool to train a model on a task.' ) train_parser.add_argument( '--train_data', type=_snake_case, required=_snake_case, help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.', ) train_parser.add_argument( '--column_label', type=_snake_case, default=0, help='Column of the dataset csv file with example labels.' ) train_parser.add_argument( '--column_text', type=_snake_case, default=1, help='Column of the dataset csv file with example texts.' ) train_parser.add_argument( '--column_id', type=_snake_case, default=2, help='Column of the dataset csv file with example ids.' ) train_parser.add_argument( '--skip_first_row', action='store_true', help='Skip the first row of the csv file (headers).' ) train_parser.add_argument('--validation_data', type=_snake_case, default='', help='path to validation dataset.' ) train_parser.add_argument( '--validation_split', type=_snake_case, default=0.1, help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.', ) train_parser.add_argument('--output', type=_snake_case, default='./', help='path to saved the trained model.' ) train_parser.add_argument( '--task', type=_snake_case, default='text_classification', help='Task to train the model on.' ) train_parser.add_argument( '--model', type=_snake_case, default='bert-base-uncased', help='Model\'s name or path to stored model.' ) train_parser.add_argument('--train_batch_size', type=_snake_case, default=3_2, help='Batch size for training.' ) train_parser.add_argument('--valid_batch_size', type=_snake_case, default=6_4, help='Batch size for validation.' ) train_parser.add_argument('--learning_rate', type=_snake_case, default=3e-5, help='Learning rate.' ) train_parser.add_argument('--adam_epsilon', type=_snake_case, default=1e-08, help='Epsilon for Adam optimizer.' ) train_parser.set_defaults(func=_snake_case ) def __init__( self : Optional[int], _snake_case : Namespace ) ->Union[str, Any]: snake_case__ : int = logging.get_logger('transformers-cli/training' ) snake_case__ : int = 'tf' if is_tf_available() else 'torch' os.makedirs(args.output, exist_ok=_snake_case ) snake_case__ : Any = args.output snake_case__ : Optional[int] = args.column_label snake_case__ : Tuple = args.column_text snake_case__ : Tuple = args.column_id self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' ) if args.task == "text_classification": snake_case__ : Dict = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(F'''Loading dataset from {args.train_data}''' ) snake_case__ : List[str] = Processor.create_from_csv( args.train_data, column_label=args.column_label, column_text=args.column_text, column_id=args.column_id, skip_first_row=args.skip_first_row, ) snake_case__ : str = None if args.validation_data: self.logger.info(F'''Loading validation dataset from {args.validation_data}''' ) snake_case__ : Optional[int] = Processor.create_from_csv( args.validation_data, column_label=args.column_label, column_text=args.column_text, column_id=args.column_id, skip_first_row=args.skip_first_row, ) snake_case__ : List[str] = args.validation_split snake_case__ : List[str] = args.train_batch_size snake_case__ : str = args.valid_batch_size snake_case__ : int = args.learning_rate snake_case__ : List[Any] = args.adam_epsilon def lowercase_ ( self : Any ) ->Dict: if self.framework == "tf": return self.run_tf() return self.run_torch() def lowercase_ ( self : Dict ) ->List[Any]: raise NotImplementedError def lowercase_ ( self : str ) ->Optional[Any]: self.pipeline.fit( self.train_dataset, validation_data=self.valid_dataset, validation_split=self.validation_split, learning_rate=self.learning_rate, adam_epsilon=self.adam_epsilon, train_batch_size=self.train_batch_size, valid_batch_size=self.valid_batch_size, ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
478
1
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast _A = datasets.utils.logging.get_logger(__name__) @dataclass class _lowerCAmelCase ( datasets.BuilderConfig ): _lowercase =1_00_00 _lowercase =None _lowercase =None class _lowerCAmelCase ( datasets.ArrowBasedBuilder ): _lowercase =ParquetConfig def __a ( self ) -> Any: return datasets.DatasetInfo(features=self.config.features ) def __a ( self , _UpperCamelCase ) -> Tuple: if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) lowerCAmelCase_ = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_UpperCamelCase , (str, list, tuple) ): lowerCAmelCase_ = data_files if isinstance(_UpperCamelCase , _UpperCamelCase ): lowerCAmelCase_ = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive lowerCAmelCase_ = [dl_manager.iter_files(_UpperCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] lowerCAmelCase_ = [] for split_name, files in data_files.items(): if isinstance(_UpperCamelCase , _UpperCamelCase ): lowerCAmelCase_ = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive lowerCAmelCase_ = [dl_manager.iter_files(_UpperCamelCase ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(_UpperCamelCase ): with open(_UpperCamelCase , "rb" ) as f: lowerCAmelCase_ = datasets.Features.from_arrow_schema(pq.read_schema(_UpperCamelCase ) ) break splits.append(datasets.SplitGenerator(name=_UpperCamelCase , gen_kwargs={"files": files} ) ) return splits def __a ( self , _UpperCamelCase ) -> pa.Table: if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example lowerCAmelCase_ = table_cast(_UpperCamelCase , self.info.features.arrow_schema ) return pa_table def __a ( self , _UpperCamelCase ) -> Dict: lowerCAmelCase_ = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" ) for file_idx, file in enumerate(itertools.chain.from_iterable(_UpperCamelCase ) ): with open(_UpperCamelCase , "rb" ) as f: lowerCAmelCase_ = pq.ParquetFile(_UpperCamelCase ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): lowerCAmelCase_ = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"""{file_idx}_{batch_idx}""", self._cast_table(_UpperCamelCase ) except ValueError as e: logger.error(f"""Failed to read file '{file}' with error {type(_UpperCamelCase )}: {e}""" ) raise
279
from __future__ import annotations _A = list[tuple[int, int]] _A = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] _A = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class _lowerCAmelCase : def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Tuple: lowerCAmelCase_ = pos_x lowerCAmelCase_ = pos_y lowerCAmelCase_ = (pos_y, pos_x) lowerCAmelCase_ = goal_x lowerCAmelCase_ = goal_y lowerCAmelCase_ = g_cost lowerCAmelCase_ = parent lowerCAmelCase_ = self.calculate_heuristic() def __a ( self ) -> float: lowerCAmelCase_ = abs(self.pos_x - self.goal_x ) lowerCAmelCase_ = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , _UpperCamelCase ) -> bool: return self.f_cost < other.f_cost class _lowerCAmelCase : def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> Tuple: lowerCAmelCase_ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _UpperCamelCase ) lowerCAmelCase_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , _UpperCamelCase ) lowerCAmelCase_ = [self.start] lowerCAmelCase_ = [] lowerCAmelCase_ = False def __a ( self ) -> Path | None: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() lowerCAmelCase_ = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: lowerCAmelCase_ = True return self.retrace_path(_UpperCamelCase ) self.closed_nodes.append(_UpperCamelCase ) lowerCAmelCase_ = self.get_successors(_UpperCamelCase ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(_UpperCamelCase ) else: # retrieve the best current path lowerCAmelCase_ = self.open_nodes.pop(self.open_nodes.index(_UpperCamelCase ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(_UpperCamelCase ) else: self.open_nodes.append(_UpperCamelCase ) if not self.reached: return [self.start.pos] return None def __a ( self , _UpperCamelCase ) -> list[Node]: lowerCAmelCase_ = [] for action in delta: lowerCAmelCase_ = parent.pos_x + action[1] lowerCAmelCase_ = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_UpperCamelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( _UpperCamelCase , _UpperCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _UpperCamelCase , ) ) return successors def __a ( self , _UpperCamelCase ) -> Path: lowerCAmelCase_ = node lowerCAmelCase_ = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) lowerCAmelCase_ = current_node.parent path.reverse() return path if __name__ == "__main__": _A = (0, 0) _A = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print("------") _A = GreedyBestFirst(init, goal) _A = greedy_bf.search() if path: for pos_x, pos_y in path: _A = 2 for elem in grid: print(elem)
279
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## SCREAMING_SNAKE_CASE :Tuple = 16 SCREAMING_SNAKE_CASE :List[str] = 32 def UpperCAmelCase ( a_ , a_ = 1_6 ) -> Union[str, Any]: """simple docstring""" __A = AutoTokenizer.from_pretrained("bert-base-cased" ) __A = load_dataset("glue" , "mrpc" ) def tokenize_function(a_ ): # max_length=None => use the model max length (it's actually the default) __A = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=a_ , max_length=a_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __A = datasets.map( a_ , batched=a_ , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __A = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(a_ ): # On TPU it's best to pad everything to the same length or training will be very slow. __A = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __A = 1_6 elif accelerator.mixed_precision != "no": __A = 8 else: __A = None return tokenizer.pad( a_ , padding="longest" , max_length=a_ , pad_to_multiple_of=a_ , return_tensors="pt" , ) # Instantiate dataloaders. __A = DataLoader( tokenized_datasets["train"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ ) __A = DataLoader( tokenized_datasets["validation"] , shuffle=a_ , collate_fn=a_ , batch_size=a_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders SCREAMING_SNAKE_CASE :str = mocked_dataloaders # noqa: F811 def UpperCAmelCase ( a_ , a_ ) -> Optional[Any]: """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , a_ ) == "1": __A = 2 # Initialize accelerator __A = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __A = config["lr"] __A = int(config["num_epochs"] ) __A = int(config["seed"] ) __A = int(config["batch_size"] ) __A = evaluate.load("glue" , "mrpc" ) # If the batch size is too big we use gradient accumulation __A = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __A = batch_size // MAX_GPU_BATCH_SIZE __A = MAX_GPU_BATCH_SIZE set_seed(a_ ) __A , __A = get_dataloaders(a_ , a_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __A = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=a_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __A = model.to(accelerator.device ) # Instantiate optimizer __A = AdamW(params=model.parameters() , lr=a_ ) # Instantiate scheduler __A = get_linear_schedule_with_warmup( optimizer=a_ , num_warmup_steps=1_0_0 , num_training_steps=(len(a_ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __A , __A , __A , __A , __A = accelerator.prepare( a_ , a_ , a_ , a_ , a_ ) # Now we train the model for epoch in range(a_ ): model.train() for step, batch in enumerate(a_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __A = model(**a_ ) __A = outputs.loss __A = loss / gradient_accumulation_steps accelerator.backward(a_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() __A = 0 for step, batch in enumerate(a_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __A = model(**a_ ) __A = outputs.logits.argmax(dim=-1 ) __A , __A = accelerator.gather((predictions, batch["labels"]) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(a_ ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples __A = predictions[: len(eval_dataloader.dataset ) - samples_seen] __A = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=a_ , references=a_ , ) __A = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , a_ ) def UpperCAmelCase ( ) -> Tuple: """simple docstring""" __A = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=a_ , default=a_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) __A = parser.parse_args() __A = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6} training_function(a_ , a_ ) if __name__ == "__main__": main()
55
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase : Any = { 'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json', } class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = "mra" def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Dict=50_265 , __SCREAMING_SNAKE_CASE : Union[str, Any]=768 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_072 , __SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=512 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1 , __SCREAMING_SNAKE_CASE : Any=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=1E-5 , __SCREAMING_SNAKE_CASE : str="absolute" , __SCREAMING_SNAKE_CASE : List[Any]=4 , __SCREAMING_SNAKE_CASE : Optional[Any]="full" , __SCREAMING_SNAKE_CASE : str=0 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : Optional[Any]=1 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : Dict=2 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Tuple: """simple docstring""" super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = position_embedding_type __SCREAMING_SNAKE_CASE = block_per_row __SCREAMING_SNAKE_CASE = approx_mode __SCREAMING_SNAKE_CASE = initial_prior_first_n_blocks __SCREAMING_SNAKE_CASE = initial_prior_diagonal_n_blocks
627
0
import numpy as np from transformers import Pipeline def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]: __lowercase = np.max(SCREAMING_SNAKE_CASE , axis=-1 , keepdims=SCREAMING_SNAKE_CASE ) __lowercase = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE ) class A__ ( lowerCAmelCase__ ): def a__ ( self : Optional[int] , **_UpperCAmelCase : List[str] ) -> List[str]: """simple docstring""" __lowercase = {} if "second_text" in kwargs: __lowercase = kwargs['second_text'] return preprocess_kwargs, {}, {} def a__ ( self : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=None ) -> Tuple: """simple docstring""" return self.tokenizer(_UpperCAmelCase , text_pair=_UpperCAmelCase , return_tensors=self.framework ) def a__ ( self : Optional[int] , _UpperCAmelCase : int ) -> List[str]: """simple docstring""" return self.model(**_UpperCAmelCase ) def a__ ( self : Optional[int] , _UpperCAmelCase : int ) -> List[Any]: """simple docstring""" __lowercase = model_outputs.logits[0].numpy() __lowercase = softmax(_UpperCAmelCase ) __lowercase = np.argmax(_UpperCAmelCase ) __lowercase = self.model.config.idalabel[best_class] __lowercase = probabilities[best_class].item() __lowercase = logits.tolist() return {"label": label, "score": score, "logits": logits}
688
# Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version SCREAMING_SNAKE_CASE__ = get_logger(__name__) class A__ : lowerCAmelCase__ : Optional[int] = "dummy_data" lowerCAmelCase__ : str = "datasets" lowerCAmelCase__ : Dict = False def __init__( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Union[Version, str] , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[List[Callable]] = None , ) -> Union[str, Any]: """simple docstring""" __lowercase = 0 __lowercase = dataset_name __lowercase = cache_dir __lowercase = use_local_dummy_data __lowercase = config # download_callbacks take a single url as input __lowercase = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root __lowercase = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general __lowercase = str(_UpperCAmelCase ) # to be downloaded __lowercase = None __lowercase = None @property def a__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" if self._dummy_file is None: __lowercase = self.download_dummy_data() return self._dummy_file @property def a__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" if self.config is not None: # structure is dummy / config_name / version_name return os.path.join('dummy' , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join('dummy' , self.version_name ) @property def a__ ( self : int ) -> Tuple: """simple docstring""" return os.path.join(self.dummy_data_folder , 'dummy_data.zip' ) def a__ ( self : str ) -> Union[str, Any]: """simple docstring""" __lowercase = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) __lowercase = cached_path( _UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=_UpperCAmelCase , force_extract=_UpperCAmelCase ) return os.path.join(_UpperCAmelCase , self.dummy_file_name ) @property def a__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def a__ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" if self._bucket_url is None: __lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) ) return self._bucket_url @property def a__ ( self : List[Any] ) -> List[str]: """simple docstring""" if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] ) def a__ ( self : Union[str, Any] , _UpperCAmelCase : List[str] , *_UpperCAmelCase : Tuple ) -> Dict: """simple docstring""" if self.load_existing_dummy_data: # dummy data is downloaded and tested __lowercase = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned __lowercase = self.dummy_file_name # special case when data_url is a dict if isinstance(_UpperCAmelCase , _UpperCAmelCase ): return self.create_dummy_data_dict(_UpperCAmelCase , _UpperCAmelCase ) elif isinstance(_UpperCAmelCase , (list, tuple) ): return self.create_dummy_data_list(_UpperCAmelCase , _UpperCAmelCase ) else: return self.create_dummy_data_single(_UpperCAmelCase , _UpperCAmelCase ) def a__ ( self : Optional[int] , _UpperCAmelCase : Tuple , *_UpperCAmelCase : Optional[int] ) -> List[str]: """simple docstring""" return self.download_and_extract(_UpperCAmelCase ) def a__ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" return self.download_and_extract(_UpperCAmelCase ) def a__ ( self : Dict , _UpperCAmelCase : Tuple , *_UpperCAmelCase : str , **_UpperCAmelCase : str ) -> Optional[int]: """simple docstring""" return path def a__ ( self : str ) -> Union[str, Any]: """simple docstring""" return {} def a__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> Tuple: """simple docstring""" __lowercase = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(_UpperCAmelCase , _UpperCAmelCase ): for single_url in single_urls: download_callback(_UpperCAmelCase ) else: __lowercase = single_urls download_callback(_UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __lowercase = [os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) ) for x in single_urls] else: __lowercase = single_urls __lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(Path(_UpperCAmelCase ).name ) ) __lowercase = value # make sure that values are unique if all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique __lowercase = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]: """simple docstring""" __lowercase = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one __lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , _UpperCAmelCase ) ) for url in data_url ) __lowercase = all( url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): __lowercase = [data_url[0]] * len(_UpperCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(_UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) ) dummy_data_list.append(_UpperCAmelCase ) return dummy_data_list def a__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" for download_callback in self.download_callbacks: download_callback(_UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __lowercase = os.path.join(_UpperCAmelCase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) ) if os.path.exists(_UpperCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def a__ ( self : List[str] ) -> Any: """simple docstring""" pass def a__ ( self : int ) -> str: """simple docstring""" pass def a__ ( self : Optional[int] , _UpperCAmelCase : List[Any] ) -> Any: """simple docstring""" def _iter_archive_members(_UpperCAmelCase : Optional[Any] ): # this preserves the order of the members inside the ZIP archive __lowercase = Path(self.dummy_file ).parent __lowercase = path.relative_to(_UpperCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: __lowercase = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(_UpperCAmelCase ) __lowercase = Path(_UpperCAmelCase ) __lowercase = _iter_archive_members(_UpperCAmelCase ) if self.use_local_dummy_data else path.rglob('*' ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith(('.', '__') ): yield file_path.relative_to(_UpperCAmelCase ).as_posix(), file_path.open('rb' ) def a__ ( self : Optional[Any] , _UpperCAmelCase : List[str] ) -> str: """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): __lowercase = [paths] for path in paths: if os.path.isfile(_UpperCAmelCase ): if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ): return yield path else: for dirpath, dirnames, filenames in os.walk(_UpperCAmelCase ): if os.path.basename(_UpperCAmelCase ).startswith(('.', '__') ): continue dirnames.sort() for filename in sorted(_UpperCAmelCase ): if filename.startswith(('.', '__') ): continue yield os.path.join(_UpperCAmelCase , _UpperCAmelCase )
688
1
import argparse import os import re snake_case_ = 'src/transformers' # Pattern that looks at the indentation in a line. snake_case_ = re.compile(R'^(\s*)\S') # Pattern that matches `"key":" and puts `key` in group 0. snake_case_ = re.compile(R'^\s*"([^"]+)":') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. snake_case_ = re.compile(R'^\s*_import_structure\["([^"]+)"\]') # Pattern that matches `"key",` and puts `key` in group 0. snake_case_ = re.compile(R'^\s*"([^"]+)",\s*$') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. snake_case_ = re.compile(R'\[([^\]]+)\]') def lowerCamelCase__ ( snake_case_ : List[str] ) -> Tuple: __snake_case = _re_indent.search(snake_case_ ) return "" if search is None else search.groups()[0] def lowerCamelCase__ ( snake_case_ : Dict , snake_case_ : Optional[int]="" , snake_case_ : List[Any]=None , snake_case_ : List[str]=None ) -> Dict: __snake_case = 0 __snake_case = code.split('''\n''' ) if start_prompt is not None: while not lines[index].startswith(snake_case_ ): index += 1 __snake_case = ['''\n'''.join(lines[:index] )] else: __snake_case = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). __snake_case = [lines[index]] index += 1 while index < len(snake_case_ ) and (end_prompt is None or not lines[index].startswith(snake_case_ )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(snake_case_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ): current_block.append(lines[index] ) blocks.append('''\n'''.join(snake_case_ ) ) if index < len(snake_case_ ) - 1: __snake_case = [lines[index + 1]] index += 1 else: __snake_case = [] else: blocks.append('''\n'''.join(snake_case_ ) ) __snake_case = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(snake_case_ ) > 0: blocks.append('''\n'''.join(snake_case_ ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(snake_case_ ): blocks.append('''\n'''.join(lines[index:] ) ) return blocks def lowerCamelCase__ ( snake_case_ : str ) -> int: def _inner(snake_case_ : Optional[int] ): return key(snake_case_ ).lower().replace('''_''' , '''''' ) return _inner def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : Optional[int]=None ) -> str: # If no key is provided, we use a noop. def noop(snake_case_ : int ): return x if key is None: __snake_case = noop # Constants are all uppercase, they go first. __snake_case = [obj for obj in objects if key(snake_case_ ).isupper()] # Classes are not all uppercase but start with a capital, they go second. __snake_case = [obj for obj in objects if key(snake_case_ )[0].isupper() and not key(snake_case_ ).isupper()] # Functions begin with a lowercase, they go last. __snake_case = [obj for obj in objects if not key(snake_case_ )[0].isupper()] __snake_case = ignore_underscore(snake_case_ ) return sorted(snake_case_ , key=snake_case_ ) + sorted(snake_case_ , key=snake_case_ ) + sorted(snake_case_ , key=snake_case_ ) def lowerCamelCase__ ( snake_case_ : List[Any] ) -> Tuple: # This inner function sort imports between [ ]. def _replace(snake_case_ : Optional[int] ): __snake_case = match.groups()[0] if "," not in imports: return f"""[{imports}]""" __snake_case = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __snake_case = keys[:-1] return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(snake_case_ )] ) + "]" __snake_case = import_statement.split('''\n''' ) if len(snake_case_ ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. __snake_case = 2 if lines[1].strip() == '''[''' else 1 __snake_case = [(i, _re_strip_line.search(snake_case_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] __snake_case = sort_objects(snake_case_ , key=lambda snake_case_ : x[1] ) __snake_case = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(snake_case_ ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: __snake_case = _re_bracket_content.sub(_replace , lines[1] ) else: __snake_case = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: __snake_case = keys[:-1] __snake_case = get_indent(lines[1] ) + ''', '''.join([f"""\"{k}\"""" for k in sort_objects(snake_case_ )] ) return "\n".join(snake_case_ ) else: # Finally we have to deal with imports fitting on one line __snake_case = _re_bracket_content.sub(_replace , snake_case_ ) return import_statement def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : Union[str, Any]=True ) -> Union[str, Any]: with open(snake_case_ , encoding='''utf-8''' ) as f: __snake_case = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 __snake_case = split_code_in_indented_blocks( snake_case_ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(snake_case_ ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. __snake_case = main_blocks[block_idx] __snake_case = block.split('''\n''' ) # Get to the start of the imports. __snake_case = 0 while line_idx < len(snake_case_ ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: __snake_case = len(snake_case_ ) else: line_idx += 1 if line_idx >= len(snake_case_ ): continue # Ignore beginning and last line: they don't contain anything. __snake_case = '''\n'''.join(block_lines[line_idx:-1] ) __snake_case = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. __snake_case = split_code_in_indented_blocks(snake_case_ , indent_level=snake_case_ ) # We have two categories of import key: list or _import_structure[key].append/extend __snake_case = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. __snake_case = [(pattern.search(snake_case_ ).groups()[0] if pattern.search(snake_case_ ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. __snake_case = [(i, key) for i, key in enumerate(snake_case_ ) if key is not None] __snake_case = [x[0] for x in sorted(snake_case_ , key=lambda snake_case_ : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. __snake_case = 0 __snake_case = [] for i in range(len(snake_case_ ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: __snake_case = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(snake_case_ ) count += 1 # And we put our main block back together with its first and last line. __snake_case = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(snake_case_ ): if check_only: return True else: print(f"""Overwriting {file}.""" ) with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f: f.write('''\n'''.join(snake_case_ ) ) def lowerCamelCase__ ( snake_case_ : Tuple=True ) -> Tuple: __snake_case = [] for root, _, files in os.walk(snake_case_ ): if "__init__.py" in files: __snake_case = sort_imports(os.path.join(snake_case_ , '''__init__.py''' ) , check_only=snake_case_ ) if result: __snake_case = [os.path.join(snake_case_ , '''__init__.py''' )] if len(snake_case_ ) > 0: raise ValueError(f"""Would overwrite {len(snake_case_ )} files, run `make style`.""" ) if __name__ == "__main__": snake_case_ = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') snake_case_ = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
592
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() snake_case_ = logging.get_logger(__name__) def lowerCamelCase__ ( snake_case_ : Any ) -> Tuple: # initialize config if "resnet-50" in model_name: __snake_case = ResNetConfig.from_pretrained('''microsoft/resnet-50''' ) elif "resnet-101" in model_name: __snake_case = ResNetConfig.from_pretrained('''microsoft/resnet-101''' ) else: raise ValueError('''Model name should include either resnet50 or resnet101''' ) __snake_case = DetrConfig(use_timm_backbone=snake_case_ , backbone_config=snake_case_ ) # set label attributes __snake_case = '''panoptic''' in model_name if is_panoptic: __snake_case = 250 else: __snake_case = 91 __snake_case = '''huggingface/label-files''' __snake_case = '''coco-detection-id2label.json''' __snake_case = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) ) __snake_case = {int(snake_case_ ): v for k, v in idalabel.items()} __snake_case = idalabel __snake_case = {v: k for k, v in idalabel.items()} return config, is_panoptic def lowerCamelCase__ ( snake_case_ : Dict ) -> Union[str, Any]: # here we list all keys to be renamed (original name on the left, our name on the right) __snake_case = [] # stem # fmt: off rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') ) rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') ) rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') ) rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') ) rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""", ) ) # 3 convs for i in range(3 ): rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""", ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""", ) ) rename_keys.append( (f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""") ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""", ) ) rename_keys.append( (f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", f"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", f"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ] ) return rename_keys def lowerCamelCase__ ( snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : int ) -> str: __snake_case = state_dict.pop(snake_case_ ) __snake_case = val def lowerCamelCase__ ( snake_case_ : int , snake_case_ : List[Any]=False ) -> List[str]: __snake_case = '''''' if is_panoptic: __snake_case = '''detr.''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) __snake_case = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) __snake_case = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict __snake_case = in_proj_weight[:256, :] __snake_case = in_proj_bias[:256] __snake_case = in_proj_weight[256:512, :] __snake_case = in_proj_bias[256:512] __snake_case = in_proj_weight[-256:, :] __snake_case = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention __snake_case = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) __snake_case = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict __snake_case = in_proj_weight[:256, :] __snake_case = in_proj_bias[:256] __snake_case = in_proj_weight[256:512, :] __snake_case = in_proj_bias[256:512] __snake_case = in_proj_weight[-256:, :] __snake_case = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention __snake_case = state_dict.pop( f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" ) __snake_case = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) of cross-attention to the state dict __snake_case = in_proj_weight_cross_attn[:256, :] __snake_case = in_proj_bias_cross_attn[:256] __snake_case = in_proj_weight_cross_attn[256:512, :] __snake_case = in_proj_bias_cross_attn[256:512] __snake_case = in_proj_weight_cross_attn[-256:, :] __snake_case = in_proj_bias_cross_attn[-256:] def lowerCamelCase__ ( ) -> int: __snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __snake_case = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : str=None , snake_case_ : Dict=False ) -> Dict: __snake_case , __snake_case = get_detr_config(snake_case_ ) # load original model from torch hub __snake_case = { '''detr-resnet-50''': '''detr_resnet50''', '''detr-resnet-101''': '''detr_resnet101''', } logger.info(f"""Converting model {model_name}...""" ) __snake_case = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=snake_case_ ).eval() __snake_case = detr.state_dict() # rename keys for src, dest in create_rename_keys(snake_case_ ): if is_panoptic: __snake_case = '''detr.''' + src rename_key(snake_case_ , snake_case_ , snake_case_ ) # query, key and value matrices need special treatment read_in_q_k_v(snake_case_ , is_panoptic=snake_case_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them __snake_case = '''detr.model.''' if is_panoptic else '''model.''' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('''detr''' ) and not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ) ): __snake_case = state_dict.pop(snake_case_ ) __snake_case = val elif "class_labels_classifier" in key or "bbox_predictor" in key: __snake_case = state_dict.pop(snake_case_ ) __snake_case = val elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ): continue else: __snake_case = state_dict.pop(snake_case_ ) __snake_case = val else: if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): __snake_case = state_dict.pop(snake_case_ ) __snake_case = val # finally, create HuggingFace model and load state dict __snake_case = DetrForSegmentation(snake_case_ ) if is_panoptic else DetrForObjectDetection(snake_case_ ) model.load_state_dict(snake_case_ ) model.eval() # verify our conversion on an image __snake_case = '''coco_panoptic''' if is_panoptic else '''coco_detection''' __snake_case = DetrImageProcessor(format=snake_case_ ) __snake_case = processor(images=prepare_img() , return_tensors='''pt''' ) __snake_case = encoding['''pixel_values'''] __snake_case = detr(snake_case_ ) __snake_case = model(snake_case_ ) assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-3 ) assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) model.save_pretrained(snake_case_ ) processor.save_pretrained(snake_case_ ) if push_to_hub: # Upload model and image processor to the hub logger.info('''Uploading PyTorch model and image processor to the hub...''' ) model.push_to_hub(f"""nielsr/{model_name}""" ) processor.push_to_hub(f"""nielsr/{model_name}""" ) if __name__ == "__main__": snake_case_ = argparse.ArgumentParser() parser.add_argument( '--model_name', default='detr-resnet-50', type=str, choices=['detr-resnet-50', 'detr-resnet-101'], help='Name of the DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.') snake_case_ = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
592
1
"""simple docstring""" import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def lowercase ( UpperCamelCase : Optional[Any] ): """simple docstring""" A__ : Dict =VideoMAEConfig() set_architecture_configs(UpperCamelCase , UpperCamelCase ) if "finetuned" not in model_name: A__ : Optional[int] =False if "finetuned" in model_name: A__ : Tuple ="huggingface/label-files" if "kinetics" in model_name: A__ : List[Any] =400 A__ : str ="kinetics400-id2label.json" elif "ssv2" in model_name: A__ : Optional[Any] =174 A__ : Optional[Any] ="something-something-v2-id2label.json" else: raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." ) A__ : str =json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="dataset" ) , "r" ) ) A__ : List[Any] ={int(UpperCamelCase ): v for k, v in idalabel.items()} A__ : Optional[Any] =idalabel A__ : Union[str, Any] ={v: k for k, v in idalabel.items()} return config def lowercase ( UpperCamelCase : int , UpperCamelCase : List[Any] ): """simple docstring""" if "small" in model_name: A__ : Any =384 A__ : Tuple =1536 A__ : Dict =12 A__ : Optional[Any] =16 A__ : int =12 A__ : Tuple =3 A__ : Union[str, Any] =192 A__ : Optional[Any] =768 elif "large" in model_name: A__ : Optional[int] =1024 A__ : Optional[int] =4096 A__ : int =24 A__ : Tuple =16 A__ : int =12 A__ : Any =8 A__ : Optional[int] =512 A__ : List[str] =2048 elif "huge" in model_name: A__ : List[Any] =1280 A__ : int =5120 A__ : List[str] =32 A__ : Dict =16 A__ : int =12 A__ : Dict =8 A__ : Optional[int] =640 A__ : Tuple =2560 elif "base" not in model_name: raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" ) def lowercase ( UpperCamelCase : Any ): """simple docstring""" if "encoder." in name: A__ : List[Any] =name.replace("encoder." , "" ) if "cls_token" in name: A__ : int =name.replace("cls_token" , "videomae.embeddings.cls_token" ) if "decoder_pos_embed" in name: A__ : int =name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" ) if "pos_embed" in name and "decoder" not in name: A__ : Any =name.replace("pos_embed" , "videomae.embeddings.position_embeddings" ) if "patch_embed.proj" in name: A__ : Optional[Any] =name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: A__ : Optional[Any] =name.replace("patch_embed.norm" , "videomae.embeddings.norm" ) if "decoder.blocks" in name: A__ : int =name.replace("decoder.blocks" , "decoder.decoder_layers" ) if "blocks" in name: A__ : Union[str, Any] =name.replace("blocks" , "videomae.encoder.layer" ) if "attn.proj" in name: A__ : Union[str, Any] =name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name and "bias" not in name: A__ : Any =name.replace("attn" , "attention.self" ) if "attn" in name: A__ : Optional[int] =name.replace("attn" , "attention.attention" ) if "norm1" in name: A__ : Optional[Any] =name.replace("norm1" , "layernorm_before" ) if "norm2" in name: A__ : Any =name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: A__ : int =name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: A__ : List[Any] =name.replace("mlp.fc2" , "output.dense" ) if "decoder_embed" in name: A__ : Tuple =name.replace("decoder_embed" , "decoder.decoder_embed" ) if "decoder_norm" in name: A__ : str =name.replace("decoder_norm" , "decoder.decoder_norm" ) if "decoder_pred" in name: A__ : Optional[int] =name.replace("decoder_pred" , "decoder.decoder_pred" ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: A__ : str =name.replace("norm.weight" , "videomae.layernorm.weight" ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: A__ : Any =name.replace("norm.bias" , "videomae.layernorm.bias" ) if "head" in name and "decoder" not in name: A__ : Union[str, Any] =name.replace("head" , "classifier" ) return name def lowercase ( UpperCamelCase : Any , UpperCamelCase : Tuple ): """simple docstring""" for key in orig_state_dict.copy().keys(): A__ : Any =orig_state_dict.pop(UpperCamelCase ) if key.startswith("encoder." ): A__ : Tuple =key.replace("encoder." , "" ) if "qkv" in key: A__ : Optional[Any] =key.split("." ) if key.startswith("decoder.blocks" ): A__ : Optional[Any] =config.decoder_hidden_size A__ : Tuple =int(key_split[2] ) A__ : str ="decoder.decoder_layers." if "weight" in key: A__ : Optional[Any] =val[:dim, :] A__ : int =val[dim : dim * 2, :] A__ : str =val[-dim:, :] else: A__ : Union[str, Any] =config.hidden_size A__ : Any =int(key_split[1] ) A__ : List[Any] ="videomae.encoder.layer." if "weight" in key: A__ : int =val[:dim, :] A__ : Tuple =val[dim : dim * 2, :] A__ : Any =val[-dim:, :] else: A__ : Dict =val return orig_state_dict def lowercase ( ): """simple docstring""" A__ : int =hf_hub_download( repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" ) A__ : Optional[Any] =np.load(UpperCamelCase ) return list(UpperCamelCase ) def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : List[str] , UpperCamelCase : List[Any] ): """simple docstring""" A__ : Tuple =get_videomae_config(UpperCamelCase ) if "finetuned" in model_name: A__ : Dict =VideoMAEForVideoClassification(UpperCamelCase ) else: A__ : List[Any] =VideoMAEForPreTraining(UpperCamelCase ) # download original checkpoint, hosted on Google Drive A__ : List[str] ="pytorch_model.bin" gdown.cached_download(UpperCamelCase , UpperCamelCase , quiet=UpperCamelCase ) A__ : Dict =torch.load(UpperCamelCase , map_location="cpu" ) if "model" in files: A__ : Dict =files["model"] else: A__ : Optional[int] =files["module"] A__ : Union[str, Any] =convert_state_dict(UpperCamelCase , UpperCamelCase ) model.load_state_dict(UpperCamelCase ) model.eval() # verify model on basic input A__ : Dict =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) A__ : Dict =prepare_video() A__ : List[Any] =image_processor(UpperCamelCase , return_tensors="pt" ) if "finetuned" not in model_name: A__ : Any =hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" ) A__ : Any =torch.load(UpperCamelCase ) A__ : List[Any] =model(**UpperCamelCase ) A__ : int =outputs.logits A__ : str =[ "videomae-small-finetuned-kinetics", "videomae-small-finetuned-ssv2", # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) "videomae-base-short", "videomae-base-short-finetuned-kinetics", "videomae-base", "videomae-base-finetuned-kinetics", "videomae-large", "videomae-large-finetuned-kinetics", "videomae-huge-finetuned-kinetics", # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) "videomae-base-short-ssv2", "videomae-base-short-finetuned-ssv2", "videomae-base-ssv2", "videomae-base-finetuned-ssv2", ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": A__ : Any =torch.Size([1, 400] ) A__ : Union[str, Any] =torch.tensor([-0.92_91, -0.40_61, -0.93_07] ) elif model_name == "videomae-small-finetuned-ssv2": A__ : Tuple =torch.Size([1, 174] ) A__ : Any =torch.tensor([0.26_71, -0.46_89, -0.82_35] ) elif model_name == "videomae-base": A__ : List[Any] =torch.Size([1, 1408, 1536] ) A__ : Union[str, Any] =torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] ) elif model_name == "videomae-base-short": A__ : str =torch.Size([1, 1408, 1536] ) A__ : Optional[Any] =torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] ) # we verified the loss both for normalized and unnormalized targets for this one A__ : Any =torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] ) elif model_name == "videomae-large": A__ : Tuple =torch.Size([1, 1408, 1536] ) A__ : Optional[int] =torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] ) elif model_name == "videomae-large-finetuned-kinetics": A__ : str =torch.Size([1, 400] ) A__ : Any =torch.tensor([0.07_71, 0.00_11, -0.36_25] ) elif model_name == "videomae-huge-finetuned-kinetics": A__ : Union[str, Any] =torch.Size([1, 400] ) A__ : List[Any] =torch.tensor([0.24_33, 0.16_32, -0.48_94] ) elif model_name == "videomae-base-short-finetuned-kinetics": A__ : str =torch.Size([1, 400] ) A__ : Optional[Any] =torch.tensor([0.65_88, 0.09_90, -0.24_93] ) elif model_name == "videomae-base-finetuned-kinetics": A__ : Union[str, Any] =torch.Size([1, 400] ) A__ : Optional[Any] =torch.tensor([0.36_69, -0.06_88, -0.24_21] ) elif model_name == "videomae-base-short-ssv2": A__ : Optional[int] =torch.Size([1, 1408, 1536] ) A__ : Any =torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] ) elif model_name == "videomae-base-short-finetuned-ssv2": A__ : Optional[int] =torch.Size([1, 174] ) A__ : Tuple =torch.tensor([-0.05_37, -0.15_39, -0.32_66] ) elif model_name == "videomae-base-ssv2": A__ : Optional[int] =torch.Size([1, 1408, 1536] ) A__ : Any =torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] ) elif model_name == "videomae-base-finetuned-ssv2": A__ : Union[str, Any] =torch.Size([1, 174] ) A__ : Any =torch.tensor([0.19_61, -0.83_37, -0.63_89] ) else: raise ValueError(F'''Model name not supported. Should be one of {model_names}''' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 ) else: print("Logits:" , logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] , UpperCamelCase , atol=1E-4 ) print("Logits ok!" ) # verify loss, if applicable if model_name == "videomae-base-short": A__ : Union[str, Any] =outputs.loss assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-4 ) print("Loss ok!" ) if pytorch_dump_folder_path is not None: print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCamelCase ) model.save_pretrained(UpperCamelCase ) if push_to_hub: print("Pushing to the hub..." ) model.push_to_hub(UpperCamelCase , organization="nielsr" ) if __name__ == "__main__": __A : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4", type=str, help=( "URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct" " download link." ), ) parser.add_argument( "--pytorch_dump_folder_path", default="/Users/nielsrogge/Documents/VideoMAE/Test", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) __A : Union[str, Any] = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
595
"""simple docstring""" def lowercase ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Any ): """simple docstring""" global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: A__ : int =mf_knapsack(i - 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) else: A__ : str =max( mf_knapsack(i - 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , mf_knapsack(i - 1 , UpperCamelCase , UpperCamelCase , j - wt[i - 1] ) + val[i - 1] , ) A__ : str =val return f[i][j] def lowercase ( UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ): """simple docstring""" A__ : Optional[int] =[[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: A__ : Tuple =max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: A__ : Tuple =dp[i - 1][w_] return dp[n][w_], dp def lowercase ( UpperCamelCase : int , UpperCamelCase : list , UpperCamelCase : list ): """simple docstring""" if not (isinstance(UpperCamelCase , (list, tuple) ) and isinstance(UpperCamelCase , (list, tuple) )): raise ValueError( "Both the weights and values vectors must be either lists or tuples" ) A__ : Optional[int] =len(UpperCamelCase ) if num_items != len(UpperCamelCase ): A__ : Any =( "The number of weights must be the same as the number of values.\n" F'''But got {num_items} weights and {len(UpperCamelCase )} values''' ) raise ValueError(UpperCamelCase ) for i in range(UpperCamelCase ): if not isinstance(wt[i] , UpperCamelCase ): A__ : List[Any] =( "All weights must be integers but got weight of " F'''type {type(wt[i] )} at index {i}''' ) raise TypeError(UpperCamelCase ) A__ , A__ : Union[str, Any] =knapsack(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) A__ : set =set() _construct_solution(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) return optimal_val, example_optional_set def lowercase ( UpperCamelCase : list , UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : set ): """simple docstring""" # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(UpperCamelCase , UpperCamelCase , i - 1 , UpperCamelCase , UpperCamelCase ) else: optimal_set.add(UpperCamelCase ) _construct_solution(UpperCamelCase , UpperCamelCase , i - 1 , j - wt[i - 1] , UpperCamelCase ) if __name__ == "__main__": __A : Union[str, Any] = [3, 2, 4, 4] __A : List[Any] = [4, 3, 2, 3] __A : Dict = 4 __A : int = 6 __A : List[Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] __A , __A : Union[str, Any] = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 __A , __A : List[str] = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("optimal_value = ", optimal_solution) print("An optimal subset corresponding to the optimal value", optimal_subset)
595
1
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ) -> Dict: _a : int =SwinConfig(image_size=192 ) if "base" in model_name: _a : int =6 _a : Dict =128 _a : List[Any] =(2, 2, 18, 2) _a : int =(4, 8, 16, 32) elif "large" in model_name: _a : Tuple =12 _a : int =192 _a : Optional[int] =(2, 2, 18, 2) _a : List[str] =(6, 12, 24, 48) else: raise ValueError("""Model not supported, only supports base and large variants""" ) _a : Dict =window_size _a : Optional[Any] =embed_dim _a : Optional[int] =depths _a : List[Any] =num_heads return config def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ) -> Optional[Any]: if "encoder.mask_token" in name: _a : Optional[Any] =name.replace("""encoder.mask_token""" ,"""embeddings.mask_token""" ) if "encoder.patch_embed.proj" in name: _a : Dict =name.replace("""encoder.patch_embed.proj""" ,"""embeddings.patch_embeddings.projection""" ) if "encoder.patch_embed.norm" in name: _a : List[Any] =name.replace("""encoder.patch_embed.norm""" ,"""embeddings.norm""" ) if "attn.proj" in name: _a : Tuple =name.replace("""attn.proj""" ,"""attention.output.dense""" ) if "attn" in name: _a : Optional[int] =name.replace("""attn""" ,"""attention.self""" ) if "norm1" in name: _a : Union[str, Any] =name.replace("""norm1""" ,"""layernorm_before""" ) if "norm2" in name: _a : Dict =name.replace("""norm2""" ,"""layernorm_after""" ) if "mlp.fc1" in name: _a : Tuple =name.replace("""mlp.fc1""" ,"""intermediate.dense""" ) if "mlp.fc2" in name: _a : Tuple =name.replace("""mlp.fc2""" ,"""output.dense""" ) if name == "encoder.norm.weight": _a : Optional[int] ="""layernorm.weight""" if name == "encoder.norm.bias": _a : Tuple ="""layernorm.bias""" if "decoder" in name: pass else: _a : List[Any] ="""swin.""" + name return name def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : List[Any] ) -> Dict: for key in orig_state_dict.copy().keys(): _a : Union[str, Any] =orig_state_dict.pop(__SCREAMING_SNAKE_CASE ) if "attn_mask" in key: pass elif "qkv" in key: _a : str =key.split(""".""" ) _a : List[str] =int(key_split[2] ) _a : List[Any] =int(key_split[4] ) _a : List[str] =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: _a : Optional[Any] =val[:dim, :] _a : Union[str, Any] =val[ dim : dim * 2, : ] _a : List[str] =val[-dim:, :] else: _a : Optional[int] =val[ :dim ] _a : int =val[ dim : dim * 2 ] _a : List[str] =val[ -dim: ] else: _a : List[Any] =val return orig_state_dict def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ) -> List[Any]: _a : Optional[Any] =torch.load(__SCREAMING_SNAKE_CASE ,map_location="""cpu""" )["""model"""] _a : List[Any] =get_swin_config(__SCREAMING_SNAKE_CASE ) _a : Any =SwinForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) model.eval() _a : List[Any] =convert_state_dict(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) model.load_state_dict(__SCREAMING_SNAKE_CASE ) _a : Any ="""http://images.cocodataset.org/val2017/000000039769.jpg""" _a : Tuple =ViTImageProcessor(size={"""height""": 192, """width""": 192} ) _a : Optional[int] =Image.open(requests.get(__SCREAMING_SNAKE_CASE ,stream=__SCREAMING_SNAKE_CASE ).raw ) _a : List[str] =image_processor(images=__SCREAMING_SNAKE_CASE ,return_tensors="""pt""" ) with torch.no_grad(): _a : Dict =model(**__SCREAMING_SNAKE_CASE ).logits print(outputs.keys() ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: print(F"Pushing model and image processor for {model_name} to hub" ) model.push_to_hub(F"microsoft/{model_name}" ) image_processor.push_to_hub(F"microsoft/{model_name}" ) if __name__ == "__main__": A__: int = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''swin-base-simmim-window6-192''', type=str, choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''], help='''Name of the Swin SimMIM model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''', type=str, help='''Path to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) A__: List[str] = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
694
'''simple docstring''' import os def _a (__SCREAMING_SNAKE_CASE ): """simple docstring""" _UpperCamelCase =len(grid[0] ) _UpperCamelCase =len(__SCREAMING_SNAKE_CASE ) _UpperCamelCase =0 _UpperCamelCase =0 _UpperCamelCase =0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) for i in range(__SCREAMING_SNAKE_CASE ): for j in range(n_rows - 3 ): _UpperCamelCase =grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] _UpperCamelCase =grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product if i < n_columns - 3: _UpperCamelCase =( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) # Right-to-left diagonal(/) product if i > 2: _UpperCamelCase =( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) _UpperCamelCase =max( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if max_product > largest: _UpperCamelCase =max_product return largest def _a (): """simple docstring""" _UpperCamelCase =[] with open(os.path.dirname(__SCREAMING_SNAKE_CASE ) + '''/grid.txt''' ) as file: for line in file: grid.append(line.strip('''\n''' ).split(''' ''' ) ) _UpperCamelCase =[[int(__SCREAMING_SNAKE_CASE ) for i in grid[j]] for j in range(len(__SCREAMING_SNAKE_CASE ) )] return largest_product(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(solution())
404
0
import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py __A : Optional[Any] = "src/diffusers" # Matches is_xxx_available() __A : List[str] = re.compile(r"is\_([a-z_]*)_available\(\)") # Matches from xxx import bla __A : str = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") __A : Optional[int] = "\n{0} = None\n" __A : int = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n" __A : Dict = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n" def UpperCAmelCase ( UpperCamelCase__ ) -> List[Any]: '''simple docstring''' __lowerCAmelCase = _re_backend.findall(UpperCamelCase__ ) if len(UpperCamelCase__ ) == 0: return None return "_and_".join(UpperCamelCase__ ) def UpperCAmelCase ( ) -> int: '''simple docstring''' with open(os.path.join(UpperCamelCase__ , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: __lowerCAmelCase = f.readlines() # Get to the point we do the actual imports for type checking __lowerCAmelCase = 0 __lowerCAmelCase = {} # Go through the end of the file while line_index < len(UpperCamelCase__ ): # If the line contains is_backend_available, we grab all objects associated with the `else` block __lowerCAmelCase = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("""else:""" ): line_index += 1 line_index += 1 __lowerCAmelCase = [] # Until we unindent, add backend objects to the list while line_index < len(UpperCamelCase__ ) and len(lines[line_index] ) > 1: __lowerCAmelCase = lines[line_index] __lowerCAmelCase = _re_single_line_import.search(UpperCamelCase__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(UpperCamelCase__ ) > 0: __lowerCAmelCase = objects else: line_index += 1 return backend_specific_objects def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[str]: '''simple docstring''' if name.isupper(): return DUMMY_CONSTANT.format(UpperCamelCase__ ) elif name.islower(): return DUMMY_FUNCTION.format(UpperCamelCase__ , UpperCamelCase__ ) else: return DUMMY_CLASS.format(UpperCamelCase__ , UpperCamelCase__ ) def UpperCAmelCase ( UpperCamelCase__=None ) -> Any: '''simple docstring''' if backend_specific_objects is None: __lowerCAmelCase = read_init() # For special correspondence backend to module name as used in the function requires_modulename __lowerCAmelCase = {} for backend, objects in backend_specific_objects.items(): __lowerCAmelCase = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]""" __lowerCAmelCase = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n""" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(UpperCamelCase__ , UpperCamelCase__ ) for o in objects] ) __lowerCAmelCase = dummy_file return dummy_files def UpperCAmelCase ( UpperCamelCase__=False ) -> str: '''simple docstring''' __lowerCAmelCase = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py __lowerCAmelCase = {"""torch""": """pt"""} # Locate actual dummy modules and read their content. __lowerCAmelCase = os.path.join(UpperCamelCase__ , """utils""" ) __lowerCAmelCase = { backend: os.path.join(UpperCamelCase__ , F'''dummy_{short_names.get(UpperCamelCase__ , UpperCamelCase__ )}_objects.py''' ) for backend in dummy_files.keys() } __lowerCAmelCase = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(UpperCamelCase__ ): with open(UpperCamelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: __lowerCAmelCase = f.read() else: __lowerCAmelCase = """""" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F'''Updating diffusers.utils.dummy_{short_names.get(UpperCamelCase__ , UpperCamelCase__ )}_objects.py as the main ''' """__init__ has new objects.""" ) with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( """The main __init__ has objects that are not present in """ F'''diffusers.utils.dummy_{short_names.get(UpperCamelCase__ , UpperCamelCase__ )}_objects.py. Run `make fix-copies` ''' """to fix this.""" ) if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") __A : Any = parser.parse_args() check_dummies(args.fix_and_overwrite)
334
from __future__ import annotations from math import ceil, floor, sqrt def UpperCAmelCase ( UpperCamelCase__ = 2_00_00_00 ) -> int: '''simple docstring''' __lowerCAmelCase = [0] __lowerCAmelCase = 42 for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target __lowerCAmelCase = 0 # the area corresponding to the grid that gives the product closest to target __lowerCAmelCase = 0 # an estimate of b, using the quadratic formula __lowerCAmelCase = 42 # the largest integer less than b_estimate __lowerCAmelCase = 42 # the largest integer less than b_estimate __lowerCAmelCase = 42 # the triangle number corresponding to b_floor __lowerCAmelCase = 42 # the triangle number corresponding to b_ceil __lowerCAmelCase = 42 for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): __lowerCAmelCase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 __lowerCAmelCase = floor(UpperCamelCase__ ) __lowerCAmelCase = ceil(UpperCamelCase__ ) __lowerCAmelCase = triangle_numbers[b_floor] __lowerCAmelCase = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): __lowerCAmelCase = triangle_b_first_guess * triangle_a __lowerCAmelCase = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): __lowerCAmelCase = triangle_b_second_guess * triangle_a __lowerCAmelCase = idx_a * b_ceil return area if __name__ == "__main__": print(f"""{solution() = }""")
334
1
'''simple docstring''' import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase__ ( A ): lowerCAmelCase_ = (PNDMScheduler,) lowerCAmelCase_ = (('num_inference_steps', 50),) def lowerCamelCase_ ( self : Tuple,**__A : Any ): _lowerCamelCase : Optional[Any] = { "num_train_timesteps": 1_0_0_0, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**__A ) return config def lowerCamelCase_ ( self : Dict,__A : Tuple=0,**__A : int ): _lowerCamelCase : Union[str, Any] = dict(self.forward_default_kwargs ) _lowerCamelCase : Tuple = kwargs.pop("num_inference_steps",__A ) _lowerCamelCase : Tuple = self.dummy_sample _lowerCamelCase : str = 0.1 * sample _lowerCamelCase : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _lowerCamelCase : List[str] = self.get_scheduler_config(**__A ) _lowerCamelCase : int = scheduler_class(**__A ) scheduler.set_timesteps(__A ) # copy over dummy past residuals _lowerCamelCase : Optional[int] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__A ) _lowerCamelCase : Optional[Any] = scheduler_class.from_pretrained(__A ) new_scheduler.set_timesteps(__A ) # copy over dummy past residuals _lowerCamelCase : Union[str, Any] = dummy_past_residuals[:] _lowerCamelCase : Union[str, Any] = scheduler.step_prk(__A,__A,__A,**__A ).prev_sample _lowerCamelCase : int = new_scheduler.step_prk(__A,__A,__A,**__A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" _lowerCamelCase : Tuple = scheduler.step_plms(__A,__A,__A,**__A ).prev_sample _lowerCamelCase : int = new_scheduler.step_plms(__A,__A,__A,**__A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase_ ( self : str ): pass def lowerCamelCase_ ( self : List[Any],__A : List[Any]=0,**__A : Union[str, Any] ): _lowerCamelCase : Any = dict(self.forward_default_kwargs ) _lowerCamelCase : Optional[int] = kwargs.pop("num_inference_steps",__A ) _lowerCamelCase : Tuple = self.dummy_sample _lowerCamelCase : Union[str, Any] = 0.1 * sample _lowerCamelCase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: _lowerCamelCase : str = self.get_scheduler_config() _lowerCamelCase : Dict = scheduler_class(**__A ) scheduler.set_timesteps(__A ) # copy over dummy past residuals (must be after setting timesteps) _lowerCamelCase : str = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__A ) _lowerCamelCase : Optional[Any] = scheduler_class.from_pretrained(__A ) # copy over dummy past residuals new_scheduler.set_timesteps(__A ) # copy over dummy past residual (must be after setting timesteps) _lowerCamelCase : Any = dummy_past_residuals[:] _lowerCamelCase : List[Any] = scheduler.step_prk(__A,__A,__A,**__A ).prev_sample _lowerCamelCase : Union[str, Any] = new_scheduler.step_prk(__A,__A,__A,**__A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" _lowerCamelCase : Any = scheduler.step_plms(__A,__A,__A,**__A ).prev_sample _lowerCamelCase : Optional[Any] = new_scheduler.step_plms(__A,__A,__A,**__A ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase_ ( self : int,**__A : Optional[Any] ): _lowerCamelCase : str = self.scheduler_classes[0] _lowerCamelCase : str = self.get_scheduler_config(**__A ) _lowerCamelCase : str = scheduler_class(**__A ) _lowerCamelCase : Any = 1_0 _lowerCamelCase : Optional[Any] = self.dummy_model() _lowerCamelCase : Optional[int] = self.dummy_sample_deter scheduler.set_timesteps(__A ) for i, t in enumerate(scheduler.prk_timesteps ): _lowerCamelCase : Dict = model(__A,__A ) _lowerCamelCase : Optional[int] = scheduler.step_prk(__A,__A,__A ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): _lowerCamelCase : List[Any] = model(__A,__A ) _lowerCamelCase : Dict = scheduler.step_plms(__A,__A,__A ).prev_sample return sample def lowerCamelCase_ ( self : int ): _lowerCamelCase : int = dict(self.forward_default_kwargs ) _lowerCamelCase : Optional[Any] = kwargs.pop("num_inference_steps",__A ) for scheduler_class in self.scheduler_classes: _lowerCamelCase : Optional[Any] = self.get_scheduler_config() _lowerCamelCase : List[Any] = scheduler_class(**__A ) _lowerCamelCase : Any = self.dummy_sample _lowerCamelCase : Union[str, Any] = 0.1 * sample if num_inference_steps is not None and hasattr(__A,"set_timesteps" ): scheduler.set_timesteps(__A ) elif num_inference_steps is not None and not hasattr(__A,"set_timesteps" ): _lowerCamelCase : Tuple = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _lowerCamelCase : str = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] _lowerCamelCase : List[Any] = dummy_past_residuals[:] _lowerCamelCase : List[Any] = scheduler.step_prk(__A,0,__A,**__A ).prev_sample _lowerCamelCase : Dict = scheduler.step_prk(__A,1,__A,**__A ).prev_sample self.assertEqual(output_a.shape,sample.shape ) self.assertEqual(output_a.shape,output_a.shape ) _lowerCamelCase : Dict = scheduler.step_plms(__A,0,__A,**__A ).prev_sample _lowerCamelCase : List[Any] = scheduler.step_plms(__A,1,__A,**__A ).prev_sample self.assertEqual(output_a.shape,sample.shape ) self.assertEqual(output_a.shape,output_a.shape ) def lowerCamelCase_ ( self : Optional[Any] ): for timesteps in [1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__A ) def lowerCamelCase_ ( self : Optional[int] ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=__A ) _lowerCamelCase : Tuple = self.scheduler_classes[0] _lowerCamelCase : List[str] = self.get_scheduler_config(steps_offset=1 ) _lowerCamelCase : Optional[int] = scheduler_class(**__A ) scheduler.set_timesteps(1_0 ) assert torch.equal( scheduler.timesteps,torch.LongTensor( [9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ),) def lowerCamelCase_ ( self : str ): for beta_start, beta_end in zip([0.0001, 0.001],[0.002, 0.02] ): self.check_over_configs(beta_start=__A,beta_end=__A ) def lowerCamelCase_ ( self : List[Any] ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__A ) def lowerCamelCase_ ( self : str ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__A ) def lowerCamelCase_ ( self : List[Any] ): for t in [1, 5, 1_0]: self.check_over_forward(time_step=__A ) def lowerCamelCase_ ( self : Optional[int] ): for t, num_inference_steps in zip([1, 5, 1_0],[1_0, 5_0, 1_0_0] ): self.check_over_forward(num_inference_steps=__A ) def lowerCamelCase_ ( self : Dict ): # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 _lowerCamelCase : List[str] = 2_7 for scheduler_class in self.scheduler_classes: _lowerCamelCase : List[Any] = self.dummy_sample _lowerCamelCase : str = 0.1 * sample _lowerCamelCase : Dict = self.get_scheduler_config() _lowerCamelCase : Any = scheduler_class(**__A ) scheduler.set_timesteps(__A ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): _lowerCamelCase : List[str] = scheduler.step_prk(__A,__A,__A ).prev_sample def lowerCamelCase_ ( self : Union[str, Any] ): with self.assertRaises(__A ): _lowerCamelCase : Optional[int] = self.scheduler_classes[0] _lowerCamelCase : Union[str, Any] = self.get_scheduler_config() _lowerCamelCase : str = scheduler_class(**__A ) scheduler.step_plms(self.dummy_sample,1,self.dummy_sample ).prev_sample def lowerCamelCase_ ( self : List[str] ): _lowerCamelCase : str = self.full_loop() _lowerCamelCase : str = torch.sum(torch.abs(__A ) ) _lowerCamelCase : Any = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 198.1318 ) < 1e-2 assert abs(result_mean.item() - 0.2580 ) < 1e-3 def lowerCamelCase_ ( self : List[Any] ): _lowerCamelCase : Dict = self.full_loop(prediction_type="v_prediction" ) _lowerCamelCase : int = torch.sum(torch.abs(__A ) ) _lowerCamelCase : int = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 67.3986 ) < 1e-2 assert abs(result_mean.item() - 0.0878 ) < 1e-3 def lowerCamelCase_ ( self : Any ): # We specify different beta, so that the first alpha is 0.99 _lowerCamelCase : Optional[Any] = self.full_loop(set_alpha_to_one=__A,beta_start=0.01 ) _lowerCamelCase : str = torch.sum(torch.abs(__A ) ) _lowerCamelCase : Dict = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 230.0399 ) < 1e-2 assert abs(result_mean.item() - 0.2995 ) < 1e-3 def lowerCamelCase_ ( self : Tuple ): # We specify different beta, so that the first alpha is 0.99 _lowerCamelCase : Union[str, Any] = self.full_loop(set_alpha_to_one=__A,beta_start=0.01 ) _lowerCamelCase : Optional[Any] = torch.sum(torch.abs(__A ) ) _lowerCamelCase : List[Any] = torch.mean(torch.abs(__A ) ) assert abs(result_sum.item() - 186.9482 ) < 1e-2 assert abs(result_mean.item() - 0.2434 ) < 1e-3
44
import fire from utils import calculate_rouge, save_json def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: Optional[int] , lowerCAmelCase: List[Any]=None , **lowerCAmelCase: Tuple ) -> Union[str, Any]: _UpperCAmelCase : Tuple = [x.strip() for x in open(lowerCAmelCase ).readlines()] _UpperCAmelCase : List[Any] = [x.strip() for x in open(lowerCAmelCase ).readlines()][: len(lowerCAmelCase )] _UpperCAmelCase : Tuple = calculate_rouge(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) if save_path is not None: save_json(lowerCAmelCase , lowerCAmelCase , indent=lowerCAmelCase ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
300
0
from collections import UserDict from typing import Union import numpy as np import requests from ..utils import ( add_end_docstrings, logging, ) from .audio_classification import ffmpeg_read from .base import PIPELINE_INIT_ARGS, Pipeline a_ : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(_a ) class __UpperCamelCase ( _a ): """simple docstring""" def __init__( self , **SCREAMING_SNAKE_CASE ) -> Any: super().__init__(**_A ) if self.framework != "pt": raise ValueError(f"The {self.__class__} is only available in PyTorch." ) # No specific FOR_XXX available yet def __call__( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]: return super().__call__(_A , **_A ) def _UpperCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Any: a__ = {} if "candidate_labels" in kwargs: a__ = kwargs['''candidate_labels'''] if "hypothesis_template" in kwargs: a__ = kwargs['''hypothesis_template'''] return preprocess_params, {}, {} def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="This is a sound of {}." ) -> Optional[Any]: if isinstance(_A , _A ): if audio.startswith('''http://''' ) or audio.startswith('''https://''' ): # We need to actually check for a real protocol, otherwise it's impossible to use a local file # like http_huggingface_co.png a__ = requests.get(_A ).content else: with open(_A , '''rb''' ) as f: a__ = f.read() if isinstance(_A , _A ): a__ = ffmpeg_read(_A , self.feature_extractor.sampling_rate ) if not isinstance(_A , np.ndarray ): raise ValueError('''We expect a numpy ndarray as input''' ) if len(audio.shape ) != 1: raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' ) a__ = self.feature_extractor( [audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' ) a__ = candidate_labels a__ = [hypothesis_template.format(_A ) for x in candidate_labels] a__ = self.tokenizer(_A , return_tensors=self.framework , padding=_A ) a__ = [text_inputs] return inputs def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Dict: a__ = model_inputs.pop('''candidate_labels''' ) a__ = model_inputs.pop('''text_inputs''' ) if isinstance(text_inputs[0] , _A ): a__ = text_inputs[0] else: # Batching case. a__ = text_inputs[0][0] a__ = self.model(**_A , **_A ) a__ = { '''candidate_labels''': candidate_labels, '''logits''': outputs.logits_per_audio, } return model_outputs def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: a__ = model_outputs.pop('''candidate_labels''' ) a__ = model_outputs['''logits'''][0] if self.framework == "pt": a__ = logits.softmax(dim=0 ) a__ = probs.tolist() else: raise ValueError('''`tf` framework not supported.''' ) a__ = [ {'''score''': score, '''label''': candidate_label} for score, candidate_label in sorted(zip(_A , _A ) , key=lambda SCREAMING_SNAKE_CASE : -x[0] ) ] return result
714
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers a_ : str = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
148
0
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def snake_case (UpperCAmelCase__ ) -> Union[str, Any]: if is_torch_version('<' , '2.0.0' ) or not hasattr(UpperCAmelCase__ , '_dynamo' ): return False return isinstance(UpperCAmelCase__ , torch._dynamo.eval_frame.OptimizedModule ) def snake_case (UpperCAmelCase__ , UpperCAmelCase__ = True ) -> Any: UpperCamelCase_: Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) UpperCamelCase_: int = is_compiled_module(UpperCAmelCase__ ) if is_compiled: UpperCamelCase_: List[str] = model UpperCamelCase_: Dict = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): UpperCamelCase_: Dict = model.module if not keep_fpaa_wrapper: UpperCamelCase_: int = getattr(UpperCAmelCase__ , 'forward' ) UpperCamelCase_: List[str] = model.__dict__.pop('_original_forward' , UpperCAmelCase__ ) if original_forward is not None: while hasattr(UpperCAmelCase__ , '__wrapped__' ): UpperCamelCase_: Any = forward.__wrapped__ if forward == original_forward: break UpperCamelCase_: Optional[int] = forward if getattr(UpperCAmelCase__ , '_converted_to_transformer_engine' , UpperCAmelCase__ ): convert_model(UpperCAmelCase__ , to_transformer_engine=UpperCAmelCase__ ) if is_compiled: UpperCamelCase_: Union[str, Any] = model UpperCamelCase_: Tuple = compiled_model return model def snake_case () -> List[str]: PartialState().wait_for_everyone() def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict: if PartialState().distributed_type == DistributedType.TPU: xm.save(UpperCAmelCase__ , UpperCAmelCase__ ) elif PartialState().local_process_index == 0: torch.save(UpperCAmelCase__ , UpperCAmelCase__ ) @contextmanager def snake_case (**UpperCAmelCase__ ) -> Any: for key, value in kwargs.items(): UpperCamelCase_: int = str(UpperCAmelCase__ ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def snake_case (UpperCAmelCase__ ) -> str: if not hasattr(UpperCAmelCase__ , '__qualname__' ) and not hasattr(UpperCAmelCase__ , '__name__' ): UpperCamelCase_: List[Any] = getattr(UpperCAmelCase__ , '__class__' , UpperCAmelCase__ ) if hasattr(UpperCAmelCase__ , '__qualname__' ): return obj.__qualname__ if hasattr(UpperCAmelCase__ , '__name__' ): return obj.__name__ return str(UpperCAmelCase__ ) def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Any: for key, value in source.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): UpperCamelCase_: Any = destination.setdefault(UpperCAmelCase__ , {} ) merge_dicts(UpperCAmelCase__ , UpperCAmelCase__ ) else: UpperCamelCase_: str = value return destination def snake_case (UpperCAmelCase__ = None ) -> bool: if port is None: UpperCamelCase_: List[str] = 2_9_5_0_0 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('localhost', port) ) == 0
57
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A_ : Tuple = { 'configuration_distilbert': [ 'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DistilBertConfig', 'DistilBertOnnxConfig', ], 'tokenization_distilbert': ['DistilBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Optional[Any] = ['DistilBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : int = [ 'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DistilBertForMaskedLM', 'DistilBertForMultipleChoice', 'DistilBertForQuestionAnswering', 'DistilBertForSequenceClassification', 'DistilBertForTokenClassification', 'DistilBertModel', 'DistilBertPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[Any] = [ 'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDistilBertForMaskedLM', 'TFDistilBertForMultipleChoice', 'TFDistilBertForQuestionAnswering', 'TFDistilBertForSequenceClassification', 'TFDistilBertForTokenClassification', 'TFDistilBertMainLayer', 'TFDistilBertModel', 'TFDistilBertPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : int = [ 'FlaxDistilBertForMaskedLM', 'FlaxDistilBertForMultipleChoice', 'FlaxDistilBertForQuestionAnswering', 'FlaxDistilBertForSequenceClassification', 'FlaxDistilBertForTokenClassification', 'FlaxDistilBertModel', 'FlaxDistilBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys A_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
57
1
'''simple docstring''' import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration A_ : Any =5_00_00 A_ : List[Any] =50_00 A_ , A_ : Union[str, Any] =os.path.split(__file__) A_ : Union[str, Any] =os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json''')) @get_duration def snake_case_ ( __snake_case : datasets.Dataset , __snake_case : str) -> int: for i in range(__snake_case): lowerCAmelCase_ = dataset[i] @get_duration def snake_case_ ( __snake_case : datasets.Dataset , __snake_case : Tuple , __snake_case : Union[str, Any]) -> Optional[Any]: for i in range(0 , len(__snake_case) , __snake_case): lowerCAmelCase_ = dataset[i : i + batch_size] @get_duration def snake_case_ ( __snake_case : datasets.Dataset , __snake_case : Optional[int] , __snake_case : Optional[int]) -> Any: with dataset.formatted_as(type=__snake_case): for i in range(__snake_case): lowerCAmelCase_ = dataset[i] @get_duration def snake_case_ ( __snake_case : datasets.Dataset , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Optional[int]) -> int: with dataset.formatted_as(type=__snake_case): for i in range(0 , __snake_case , __snake_case): lowerCAmelCase_ = dataset[i : i + batch_size] def snake_case_ ( ) -> int: lowerCAmelCase_ = {'''num examples''': SPEED_TEST_N_EXAMPLES} lowerCAmelCase_ = [ (read, {'''length''': SMALL_TEST}), (read, {'''length''': SPEED_TEST_N_EXAMPLES}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}), (read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}), ] lowerCAmelCase_ = [ (read, {'''length''': SMALL_TEST}), (read, {'''length''': SPEED_TEST_N_EXAMPLES}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}), (read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('''generating dataset''') lowerCAmelCase_ = datasets.Features( {'''list''': datasets.Sequence(datasets.Value('''float32''')), '''numbers''': datasets.Value('''float32''')}) lowerCAmelCase_ = generate_example_dataset( os.path.join(__snake_case , '''dataset.arrow''') , __snake_case , num_examples=__snake_case , seq_shapes={'''list''': (100,)} , ) print('''first set of iterations''') for func, kwargs in functions: print(func.__name__ , str(__snake_case)) lowerCAmelCase_ = func(__snake_case , **__snake_case) print('''shuffling dataset''') lowerCAmelCase_ = dataset.shuffle() print('''Second set of iterations (after shuffling''') for func, kwargs in functions_shuffled: print('''shuffled ''' , func.__name__ , str(__snake_case)) lowerCAmelCase_ = func( __snake_case , **__snake_case) with open(__snake_case , '''wb''') as f: f.write(json.dumps(__snake_case).encode('''utf-8''')) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
606
'''simple docstring''' def snake_case_ ( __snake_case : int = 1000) -> int: lowerCAmelCase_ = 3 lowerCAmelCase_ = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(f'''{solution() = }''')
606
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: _lowercase = None _lowercase = logging.get_logger(__name__) _lowercase = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''} _lowercase = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, '''tokenizer_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''', }, } _lowercase = { '''google/rembert''': 2_56, } _lowercase = '''▁''' class __A ( A_ ): UpperCamelCase :Union[str, Any] = VOCAB_FILES_NAMES UpperCamelCase :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase :str = RemBertTokenizer def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=True , __magic_name__=True , __magic_name__=False , __magic_name__="[CLS]" , __magic_name__="[SEP]" , __magic_name__="<unk>" , __magic_name__="[SEP]" , __magic_name__="<pad>" , __magic_name__="[CLS]" , __magic_name__="[MASK]" , **__magic_name__ , ): # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ : int = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token super().__init__( __magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , remove_space=__magic_name__ , keep_accents=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , **__magic_name__ , ) lowerCamelCase__ : List[Any] = do_lower_case lowerCamelCase__ : Optional[Any] = remove_space lowerCamelCase__ : Dict = keep_accents lowerCamelCase__ : Optional[int] = vocab_file lowerCamelCase__ : Dict = False if not self.vocab_file else True def _snake_case (self , __magic_name__ , __magic_name__ = None ): lowerCamelCase__ : Optional[int] = [self.sep_token_id] lowerCamelCase__ : List[Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _snake_case (self , __magic_name__ , __magic_name__ = None , __magic_name__ = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1] return [1] + ([0] * len(__magic_name__ )) + [1] def _snake_case (self , __magic_name__ , __magic_name__ = None ): lowerCamelCase__ : Union[str, Any] = [self.sep_token_id] lowerCamelCase__ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case (self , __magic_name__ , __magic_name__ = None ): if not os.path.isdir(__magic_name__ ): logger.error("""Vocabulary path ({}) should be a directory""".format(__magic_name__ ) ) return lowerCamelCase__ : Optional[Any] = os.path.join( __magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ): copyfile(self.vocab_file , __magic_name__ ) return (out_vocab_file,)
157
import math def _A (UpperCamelCase : list , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 ) ->list: '''simple docstring''' lowerCamelCase__ : Tuple = end or len(UpperCamelCase ) for i in range(UpperCamelCase , UpperCamelCase ): lowerCamelCase__ : List[str] = i lowerCamelCase__ : List[str] = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: lowerCamelCase__ : Union[str, Any] = array[temp_index - 1] temp_index -= 1 lowerCamelCase__ : Dict = temp_index_value return array def _A (UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int ) ->None: # Max Heap '''simple docstring''' lowerCamelCase__ : Any = index lowerCamelCase__ : int = 2 * index + 1 # Left Node lowerCamelCase__ : str = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: lowerCamelCase__ : Tuple = left_index if right_index < heap_size and array[largest] < array[right_index]: lowerCamelCase__ : List[Any] = right_index if largest != index: lowerCamelCase__ ,lowerCamelCase__ : Any = array[largest], array[index] heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def _A (UpperCamelCase : list ) ->list: '''simple docstring''' lowerCamelCase__ : Optional[int] = len(UpperCamelCase ) for i in range(n // 2 , -1 , -1 ): heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) for i in range(n - 1 , 0 , -1 ): lowerCamelCase__ ,lowerCamelCase__ : int = array[0], array[i] heapify(UpperCamelCase , 0 , UpperCamelCase ) return array def _A (UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ) ->int: '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def _A (UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ) ->int: '''simple docstring''' lowerCamelCase__ : Optional[Any] = low lowerCamelCase__ : Optional[int] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i lowerCamelCase__ ,lowerCamelCase__ : Dict = array[j], array[i] i += 1 def _A (UpperCamelCase : list ) ->list: '''simple docstring''' if len(UpperCamelCase ) == 0: return array lowerCamelCase__ : Any = 2 * math.ceil(math.loga(len(UpperCamelCase ) ) ) lowerCamelCase__ : Union[str, Any] = 16 return intro_sort(UpperCamelCase , 0 , len(UpperCamelCase ) , UpperCamelCase , UpperCamelCase ) def _A (UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ) ->list: '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(UpperCamelCase ) max_depth -= 1 lowerCamelCase__ : Tuple = median_of_a(UpperCamelCase , UpperCamelCase , start + ((end - start) // 2) + 1 , end - 1 ) lowerCamelCase__ : Optional[int] = partition(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) intro_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : str = p return insertion_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() _lowercase = input('''Enter numbers separated by a comma : ''').strip() _lowercase = [float(item) for item in user_input.split(''',''')] print(sort(unsorted))
157
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase = { '''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''], '''tokenization_roc_bert''': ['''RoCBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ '''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoCBertForCausalLM''', '''RoCBertForMaskedLM''', '''RoCBertForMultipleChoice''', '''RoCBertForPreTraining''', '''RoCBertForQuestionAnswering''', '''RoCBertForSequenceClassification''', '''RoCBertForTokenClassification''', '''RoCBertLayer''', '''RoCBertModel''', '''RoCBertPreTrainedModel''', '''load_tf_weights_in_roc_bert''', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
716
import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class snake_case_ ( pl.LightningModule ): def __init__( self : Union[str, Any] , _snake_case : List[str] )->List[str]: '''simple docstring''' super().__init__() __lowerCAmelCase : Dict = model __lowerCAmelCase : str = 2 __lowerCAmelCase : List[str] = nn.Linear(self.model.config.hidden_size , self.num_labels ) def UpperCAmelCase__ ( self : str )->Dict: '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str ) -> str: # load longformer model from model identifier __lowerCAmelCase : int = LongformerModel.from_pretrained(SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = LightningModel(SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device("""cpu""" ) ) lightning_model.load_state_dict(ckpt["""state_dict"""] ) # init longformer question answering model __lowerCAmelCase : Optional[Any] = LongformerForQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(SCREAMING_SNAKE_CASE ) print(F'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' ) if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--longformer_model', default=None, type=str, required=True, help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.', ) parser.add_argument( '--longformer_question_answering_ckpt_path', default=None, type=str, required=True, help='Path the official PyTorch Lightning Checkpoint.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _UpperCAmelCase = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
240
0
'''simple docstring''' import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) class __lowercase ( lowerCAmelCase__ ): '''simple docstring''' def __init__(self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ) -> int: '''simple docstring''' super().__init__( UpperCAmelCase__ ,question_encoder_tokenizer=UpperCAmelCase__ ,generator_tokenizer=UpperCAmelCase__ ,index=UpperCAmelCase__ ,init_retrieval=UpperCAmelCase__ ,) __lowercase = None def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict: '''simple docstring''' logger.info('''initializing retrieval''' ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info('''dist initialized''' ) # needs to be set manually __lowercase = self._infer_socket_ifname() # avoid clash with the NCCL port __lowercase = str(distributed_port + 1 ) __lowercase = dist.new_group(ranks=UpperCAmelCase__ ,backend='''gloo''' ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info('''dist not initialized / main''' ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def _UpperCAmelCase (self ) -> Optional[int]: '''simple docstring''' return dist.get_rank(group=self.process_group ) == 0 def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=torch.floataa ) -> int: '''simple docstring''' __lowercase = torch.empty(UpperCAmelCase__ ,dtype=UpperCAmelCase__ ) dist.scatter(UpperCAmelCase__ ,src=0 ,scatter_list=UpperCAmelCase__ ,group=self.process_group ) return target_tensor def _UpperCAmelCase (self ) -> Any: '''simple docstring''' __lowercase = psutil.net_if_addrs() # a hacky way to deal with varying network interface names __lowercase = next((addr for addr in addrs if addr.startswith('''e''' )) ,UpperCAmelCase__ ) return ifname def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Tuple[np.ndarray, List[dict]]: '''simple docstring''' if not dist.is_initialized(): __lowercase , __lowercase = self._main_retrieve(UpperCAmelCase__ ,UpperCAmelCase__ ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase__ ) # distributed training __lowercase = dist.get_world_size(group=self.process_group ) # gather logic __lowercase = None if self._is_main(): __lowercase = [torch.empty(question_hidden_states.shape ,dtype=torch.floataa ) for _ in range(UpperCAmelCase__ )] dist.gather(torch.tensor(UpperCAmelCase__ ) ,dst=0 ,gather_list=UpperCAmelCase__ ,group=self.process_group ) # scatter logic __lowercase = question_hidden_states.shape[0] __lowercase = [] __lowercase = [] if self._is_main(): assert len(UpperCAmelCase__ ) == world_size __lowercase , __lowercase = self._main_retrieve(torch.cat(UpperCAmelCase__ ).numpy() ,UpperCAmelCase__ ) __lowercase , __lowercase = torch.tensor(UpperCAmelCase__ ), torch.tensor(UpperCAmelCase__ ) __lowercase = self._chunk_tensor(UpperCAmelCase__ ,UpperCAmelCase__ ) __lowercase = self._chunk_tensor(UpperCAmelCase__ ,UpperCAmelCase__ ) __lowercase = self._scattered(UpperCAmelCase__ ,[n_queries, n_docs] ,target_type=torch.intaa ) __lowercase = self._scattered(UpperCAmelCase__ ,[n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCAmelCase__ )
502
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def a ( _UpperCAmelCase ) -> int: """simple docstring""" if ( (cp >= 0X4_e00 and cp <= 0X9_fff) or (cp >= 0X3_400 and cp <= 0X4_dbf) # or (cp >= 0X20_000 and cp <= 0X2a_6df) # or (cp >= 0X2a_700 and cp <= 0X2b_73f) # or (cp >= 0X2b_740 and cp <= 0X2b_81f) # or (cp >= 0X2b_820 and cp <= 0X2c_eaf) # or (cp >= 0Xf_900 and cp <= 0Xf_aff) or (cp >= 0X2f_800 and cp <= 0X2f_a1f) # ): # return True return False def a ( _UpperCAmelCase ) -> Union[str, Any]: """simple docstring""" for char in word: a_ = ord(_UpperCAmelCase ) if not _is_chinese_char(_UpperCAmelCase ): return 0 return 1 def a ( _UpperCAmelCase ) -> Tuple: """simple docstring""" a_ = set() for token in tokens: a_ = len(_UpperCAmelCase ) > 1 and is_chinese(_UpperCAmelCase ) if chinese_word: word_set.add(_UpperCAmelCase ) a_ = list(_UpperCAmelCase ) return word_list def a ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]: """simple docstring""" if not chinese_word_set: return bert_tokens a_ = max([len(_UpperCAmelCase ) for w in chinese_word_set] ) a_ = bert_tokens a_ , a_ = 0, len(_UpperCAmelCase ) while start < end: a_ = True if is_chinese(bert_word[start] ): a_ = min(end - start , _UpperCAmelCase ) for i in range(_UpperCAmelCase , 1 , -1 ): a_ = ''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): a_ = '##' + bert_word[j] a_ = start + i a_ = False break if single_word: start += 1 return bert_word def a ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any: """simple docstring""" a_ = [] for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ): a_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=['cws'] ).cws a_ = [get_chinese_word(_UpperCAmelCase ) for r in res] ltp_res.extend(_UpperCAmelCase ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) a_ = [] for i in range(0 , len(_UpperCAmelCase ) , 1_0_0 ): a_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=5_1_2 ) bert_res.extend(res['input_ids'] ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) a_ = [] for input_ids, chinese_word in zip(_UpperCAmelCase , _UpperCAmelCase ): a_ = [] for id in input_ids: a_ = bert_tokenizer._convert_id_to_token(_UpperCAmelCase ) input_tokens.append(_UpperCAmelCase ) a_ = add_sub_symbol(_UpperCAmelCase , _UpperCAmelCase ) a_ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_UpperCAmelCase ): if token[:2] == "##": a_ = token[2:] # save chinese tokens' pos if len(_UpperCAmelCase ) == 1 and _is_chinese_char(ord(_UpperCAmelCase ) ): ref_id.append(_UpperCAmelCase ) ref_ids.append(_UpperCAmelCase ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ) return ref_ids def a ( _UpperCAmelCase ) -> Optional[Any]: """simple docstring""" with open(args.file_name , 'r' , encoding='utf-8' ) as f: a_ = f.readlines() a_ = [line.strip() for line in data if len(_UpperCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' a_ = LTP(args.ltp ) # faster in GPU device a_ = BertTokenizer.from_pretrained(args.bert ) a_ = prepare_ref(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) with open(args.save_path , 'w' , encoding='utf-8' ) as f: a_ = [json.dumps(_UpperCAmelCase ) + '\n' for ref in ref_ids] f.writelines(_UpperCAmelCase ) if __name__ == "__main__": __lowerCAmelCase =argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", required=False, type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", required=False, type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path", ) parser.add_argument( "--bert", required=False, type=str, default="./resources/robert", help="resources for Bert tokenizer", ) parser.add_argument( "--save_path", required=False, type=str, default="./resources/ref.txt", help="path to save res", ) __lowerCAmelCase =parser.parse_args() main(args)
697
0
from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class A : """simple docstring""" lowerCamelCase = 42 lowerCamelCase = None lowerCamelCase = None def _snake_case( ) -> int: '''simple docstring''' A__ = Node(1 ) A__ = Node(2 ) A__ = Node(3 ) A__ = Node(4 ) A__ = Node(5 ) return tree def _snake_case( SCREAMING_SNAKE_CASE__ : Node | None ) -> Optional[Any]: '''simple docstring''' return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def _snake_case( SCREAMING_SNAKE_CASE__ : Node | None ) -> str: '''simple docstring''' return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def _snake_case( SCREAMING_SNAKE_CASE__ : Node | None ) -> Optional[Any]: '''simple docstring''' return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def _snake_case( SCREAMING_SNAKE_CASE__ : Node | None ) -> Any: '''simple docstring''' return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def _snake_case( SCREAMING_SNAKE_CASE__ : Node | None ) -> int: '''simple docstring''' A__ = [] if root is None: return output A__ = deque([root] ) while process_queue: A__ = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def _snake_case( SCREAMING_SNAKE_CASE__ : Node | None , SCREAMING_SNAKE_CASE__ : int ) -> str: '''simple docstring''' A__ = [] def populate_output(SCREAMING_SNAKE_CASE__ : Node | None , SCREAMING_SNAKE_CASE__ : int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(snake_case__ , snake_case__ ) return output def _snake_case( SCREAMING_SNAKE_CASE__ : Node | None , SCREAMING_SNAKE_CASE__ : int ) -> Any: '''simple docstring''' A__ = [] def populate_output(SCREAMING_SNAKE_CASE__ : Node | None , SCREAMING_SNAKE_CASE__ : int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(snake_case__ , snake_case__ ) return output def _snake_case( SCREAMING_SNAKE_CASE__ : Node | None ) -> Union[str, Any]: '''simple docstring''' if root is None: return [] A__ = [] A__ = 0 A__ = height(snake_case__ ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(snake_case__ , snake_case__ ) ) A__ = 1 else: output.append(get_nodes_from_right_to_left(snake_case__ , snake_case__ ) ) A__ = 0 return output def _snake_case( ) -> Union[str, Any]: # Main function for testing. '''simple docstring''' A__ = make_tree() print(f'In-order Traversal: {inorder(snake_case__ )}' ) print(f'Pre-order Traversal: {preorder(snake_case__ )}' ) print(f'Post-order Traversal: {postorder(snake_case__ )}' , '\n' ) print(f'Height of Tree: {height(snake_case__ )}' , '\n' ) print('Complete Level Order Traversal: ' ) print(level_order(snake_case__ ) , '\n' ) print('Level-wise order Traversal: ' ) for level in range(1 , height(snake_case__ ) + 1 ): print(f'Level {level}:' , get_nodes_from_left_to_right(snake_case__ , level=snake_case__ ) ) print('\nZigZag order Traversal: ' ) print(zigzag(snake_case__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
706
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : int = 22 ) -> int: '''simple docstring''' A__ = range(1 , SCREAMING_SNAKE_CASE__ ) A__ = range(1 , SCREAMING_SNAKE_CASE__ ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f"""{solution(10, 22) = }""")
586
0
'''simple docstring''' def __A ( UpperCAmelCase ,UpperCAmelCase ) -> List[Any]: '''simple docstring''' return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def __A ( UpperCAmelCase ,UpperCAmelCase=0 ) -> List[str]: '''simple docstring''' return sorted(UpperCAmelCase ,key=lambda UpperCAmelCase : x[column] ) def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=float("inf" ) ) -> Dict: '''simple docstring''' for i in range(points_counts - 1 ): for j in range(i + 1 ,UpperCAmelCase ): _UpperCamelCase : Union[str, Any] = euclidean_distance_sqr(points[i] ,points[j] ) if current_dis < min_dis: _UpperCamelCase : str = current_dis return min_dis def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=float("inf" ) ) -> Optional[int]: '''simple docstring''' for i in range(min(6 ,points_counts - 1 ) ,UpperCAmelCase ): for j in range(max(0 ,i - 6 ) ,UpperCAmelCase ): _UpperCamelCase : int = euclidean_distance_sqr(points[i] ,points[j] ) if current_dis < min_dis: _UpperCamelCase : Union[str, Any] = current_dis return min_dis def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> int: '''simple docstring''' # base case if points_counts <= 3: return dis_between_closest_pair(UpperCAmelCase ,UpperCAmelCase ) # recursion _UpperCamelCase : str = points_counts // 2 _UpperCamelCase : List[Any] = closest_pair_of_points_sqr( UpperCAmelCase ,points_sorted_on_y[:mid] ,UpperCAmelCase ) _UpperCamelCase : List[Any] = closest_pair_of_points_sqr( UpperCAmelCase ,points_sorted_on_y[mid:] ,points_counts - mid ) _UpperCamelCase : Union[str, Any] = min(UpperCAmelCase ,UpperCAmelCase ) _UpperCamelCase : str = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(UpperCAmelCase ) _UpperCamelCase : int = dis_between_closest_in_strip( UpperCAmelCase ,len(UpperCAmelCase ) ,UpperCAmelCase ) return min(UpperCAmelCase ,UpperCAmelCase ) def __A ( UpperCAmelCase ,UpperCAmelCase ) -> Tuple: '''simple docstring''' _UpperCamelCase : Union[str, Any] = column_based_sort(UpperCAmelCase ,column=0 ) _UpperCamelCase : Union[str, Any] = column_based_sort(UpperCAmelCase ,column=1 ) return ( closest_pair_of_points_sqr( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) ) ** 0.5 if __name__ == "__main__": lowerCAmelCase_ : Tuple = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print("""Distance:""", closest_pair_of_points(points, len(points)))
435
'''simple docstring''' from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
435
1
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase : List[str] = logging.get_logger(__name__) lowercase : int = {'''vocab_file''': '''spiece.model'''} lowercase : Tuple = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', } } lowercase : List[str] = { '''albert-base-v1''': 5_12, '''albert-large-v1''': 5_12, '''albert-xlarge-v1''': 5_12, '''albert-xxlarge-v1''': 5_12, '''albert-base-v2''': 5_12, '''albert-large-v2''': 5_12, '''albert-xlarge-v2''': 5_12, '''albert-xxlarge-v2''': 5_12, } lowercase : Any = '''▁''' class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ): '''simple docstring''' A : Tuple = VOCAB_FILES_NAMES A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> None: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. snake_case_ : Optional[int] = ( AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE , normalized=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token ) snake_case_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , ) snake_case_ : List[Any] = do_lower_case snake_case_ : List[str] = remove_space snake_case_ : List[Any] = keep_accents snake_case_ : List[Any] = vocab_file snake_case_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_SCREAMING_SNAKE_CASE ) @property def _lowerCAmelCase ( self ) -> Union[str, Any]: return len(self.sp_model ) def _lowerCAmelCase ( self ) -> str: snake_case_ : str = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Optional[int]: snake_case_ : Optional[Any] = self.__dict__.copy() snake_case_ : List[str] = None return state def __setstate__( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: snake_case_ : Union[str, Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): snake_case_ : str = {} snake_case_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> int: if self.remove_space: snake_case_ : Optional[int] = " ".join(inputs.strip().split() ) else: snake_case_ : Tuple = inputs snake_case_ : Tuple = outputs.replace("``" , "\"" ).replace("''" , "\"" ) if not self.keep_accents: snake_case_ : Any = unicodedata.normalize("NFKD" , _SCREAMING_SNAKE_CASE ) snake_case_ : Optional[int] = "".join([c for c in outputs if not unicodedata.combining(_SCREAMING_SNAKE_CASE )] ) if self.do_lower_case: snake_case_ : Optional[int] = outputs.lower() return outputs def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: snake_case_ : Union[str, Any] = self.preprocess_text(_SCREAMING_SNAKE_CASE ) snake_case_ : Optional[Any] = self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE ) snake_case_ : Union[str, Any] = [] for piece in pieces: if len(_SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit(): snake_case_ : str = self.sp_model.EncodeAsPieces(piece[:-1].replace(_SCREAMING_SNAKE_CASE , "" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: snake_case_ : Dict = cur_pieces[1:] else: snake_case_ : List[Any] = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_SCREAMING_SNAKE_CASE ) else: new_pieces.append(_SCREAMING_SNAKE_CASE ) return new_pieces def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Any: return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> int: return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE ) def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: snake_case_ : List[Any] = [] snake_case_ : str = "" snake_case_ : Any = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token snake_case_ : Dict = True snake_case_ : Optional[int] = [] else: current_sub_tokens.append(_SCREAMING_SNAKE_CASE ) snake_case_ : Dict = False out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) return out_string.strip() def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]: snake_case_ : int = [self.sep_token_id] snake_case_ : str = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE ) if token_ids_a is not None: return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]: snake_case_ : Optional[Any] = [self.sep_token_id] snake_case_ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: if not os.path.isdir(_SCREAMING_SNAKE_CASE ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ : Tuple = os.path.join( _SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE ) elif not os.path.isfile(self.vocab_file ): with open(_SCREAMING_SNAKE_CASE , "wb" ) as fi: snake_case_ : List[Any] = self.sp_model.serialized_model_proto() fi.write(_SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
114
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase : Optional[int] = { '''configuration_nllb_moe''': [ '''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NllbMoeConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ '''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NllbMoeForConditionalGeneration''', '''NllbMoeModel''', '''NllbMoePreTrainedModel''', '''NllbMoeTop2Router''', '''NllbMoeSparseMLP''', ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys lowercase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
114
1
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class snake_case : '''simple docstring''' def __init__( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : Tuple=10 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : int=32 * 4 , lowerCAmelCase_ : Optional[Any]=32 * 6 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : Tuple=32 , ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_auxiliary_loss SCREAMING_SNAKE_CASE_ = num_queries SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = min_size SCREAMING_SNAKE_CASE_ = max_size SCREAMING_SNAKE_CASE_ = num_labels SCREAMING_SNAKE_CASE_ = mask_feature_size def _lowercase ( self : Optional[int] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( lowerCAmelCase_ ) SCREAMING_SNAKE_CASE_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE_ = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCAmelCase_ ) > 0.5 ).float() SCREAMING_SNAKE_CASE_ = (torch.rand((self.batch_size, self.num_labels) , device=lowerCAmelCase_ ) > 0.5).long() SCREAMING_SNAKE_CASE_ = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _lowercase ( self : Tuple ) -> Optional[Any]: """simple docstring""" return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def _lowercase ( self : Tuple ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def _lowercase ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE_ = output.encoder_hidden_states SCREAMING_SNAKE_CASE_ = output.pixel_decoder_hidden_states SCREAMING_SNAKE_CASE_ = output.transformer_decoder_hidden_states self.parent.assertTrue(len(lowerCAmelCase_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCAmelCase_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowerCAmelCase_ ) , config.decoder_config.decoder_layers ) def _lowercase ( self : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any]=False ) -> Dict: """simple docstring""" with torch.no_grad(): SCREAMING_SNAKE_CASE_ = MaskFormerModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() SCREAMING_SNAKE_CASE_ = model(pixel_values=lowerCAmelCase_ , pixel_mask=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(lowerCAmelCase_ , lowerCAmelCase_ ) def _lowercase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ = MaskFormerForInstanceSegmentation(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() def comm_check_on_output(lowerCAmelCase_ : List[str] ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(pixel_values=lowerCAmelCase_ , pixel_mask=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase_ ) comm_check_on_output(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE_ = model( pixel_values=lowerCAmelCase_ , pixel_mask=lowerCAmelCase_ , mask_labels=lowerCAmelCase_ , class_labels=lowerCAmelCase_ ) comm_check_on_output(lowerCAmelCase_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () UpperCAmelCase : List[str] = ( {"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) UpperCAmelCase : Tuple = False UpperCAmelCase : int = False UpperCAmelCase : int = False UpperCAmelCase : List[Any] = False def _lowercase ( self : Tuple ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ = MaskFormerModelTester(self ) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ ) def _lowercase ( self : Optional[int] ) -> int: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(lowerCAmelCase_ , **lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ ) def _lowercase ( self : Optional[int] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCAmelCase_ ) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''' ) def _lowercase ( self : Dict ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' ) def _lowercase ( self : str ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason='''MaskFormer is not a generative model''' ) def _lowercase ( self : List[str] ) -> Tuple: """simple docstring""" pass @unittest.skip(reason='''MaskFormer does not use token embeddings''' ) def _lowercase ( self : int ) -> Tuple: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def _lowercase ( self : Optional[int] ) -> Any: """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _lowercase ( self : List[Any] ) -> Optional[int]: """simple docstring""" pass def _lowercase ( self : Tuple ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = model_class(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) @slow def _lowercase ( self : Tuple ) -> List[str]: """simple docstring""" for model_name in ["facebook/maskformer-swin-small-coco"]: SCREAMING_SNAKE_CASE_ = MaskFormerModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def _lowercase ( self : str ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ = (self.model_tester.min_size,) * 2 SCREAMING_SNAKE_CASE_ = { '''pixel_values''': torch.randn((2, 3, *size) , device=lowerCAmelCase_ ), '''mask_labels''': torch.randn((2, 10, *size) , device=lowerCAmelCase_ ), '''class_labels''': torch.zeros(2 , 10 , device=lowerCAmelCase_ ).long(), } SCREAMING_SNAKE_CASE_ = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE_ = model(**lowerCAmelCase_ ) self.assertTrue(outputs.loss is not None ) def _lowercase ( self : Any ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(lowerCAmelCase_ , **lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ ) def _lowercase ( self : Dict ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_ = model_class(lowerCAmelCase_ ).to(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE_ = model(**lowerCAmelCase_ , output_attentions=lowerCAmelCase_ ) self.assertTrue(outputs.attentions is not None ) def _lowercase ( self : List[str] ) -> List[str]: """simple docstring""" if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss SCREAMING_SNAKE_CASE_ = self.all_model_classes[1] SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.train() SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase_ , mask_labels=lowerCAmelCase_ , class_labels=lowerCAmelCase_ ).loss loss.backward() def _lowercase ( self : Any ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ = self.all_model_classes[1] SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.train() SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase_ , mask_labels=lowerCAmelCase_ , class_labels=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() SCREAMING_SNAKE_CASE_ = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't SCREAMING_SNAKE_CASE_ = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() SCREAMING_SNAKE_CASE_ = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=lowerCAmelCase_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) A_ = 1e-4 def UpperCAmelCase ( )-> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class snake_case ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowercase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' ) if is_vision_available() else None ) def _lowercase ( self : List[Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE_ = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE_ = self.default_image_processor SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE_ = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCAmelCase_ , (1, 3, 800, 1_088) ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**lowerCAmelCase_ ) SCREAMING_SNAKE_CASE_ = torch.tensor( [[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(lowerCAmelCase_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) ) SCREAMING_SNAKE_CASE_ = torch.tensor( [[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(lowerCAmelCase_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) ) SCREAMING_SNAKE_CASE_ = torch.tensor( [[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(lowerCAmelCase_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) ) def _lowercase ( self : Tuple ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(lowerCAmelCase_ ) .eval() ) SCREAMING_SNAKE_CASE_ = self.default_image_processor SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE_ = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCAmelCase_ , (1, 3, 800, 1_088) ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**lowerCAmelCase_ ) # masks_queries_logits SCREAMING_SNAKE_CASE_ = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) SCREAMING_SNAKE_CASE_ = [ [-1.3_737_124, -1.7_724_937, -1.9_364_233], [-1.5_977_281, -1.9_867_939, -2.1_523_695], [-1.5_795_398, -1.9_269_832, -2.093_942], ] SCREAMING_SNAKE_CASE_ = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) ) # class_queries_logits SCREAMING_SNAKE_CASE_ = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) SCREAMING_SNAKE_CASE_ = torch.tensor( [ [1.6512e00, -5.2572e00, -3.3519e00], [3.6169e-02, -5.9025e00, -2.9313e00], [1.0766e-04, -7.7630e00, -5.1263e00], ] ).to(lowerCAmelCase_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) ) def _lowercase ( self : int ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' ) .to(lowerCAmelCase_ ) .eval() ) SCREAMING_SNAKE_CASE_ = self.default_image_processor SCREAMING_SNAKE_CASE_ = prepare_img() SCREAMING_SNAKE_CASE_ = image_processor(lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE_ = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowerCAmelCase_ , (1, 3, 800, 1_088) ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**lowerCAmelCase_ ) # masks_queries_logits SCREAMING_SNAKE_CASE_ = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) SCREAMING_SNAKE_CASE_ = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]] SCREAMING_SNAKE_CASE_ = torch.tensor(lowerCAmelCase_ ).to(lowerCAmelCase_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) ) # class_queries_logits SCREAMING_SNAKE_CASE_ = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) SCREAMING_SNAKE_CASE_ = torch.tensor( [[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(lowerCAmelCase_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) ) def _lowercase ( self : str ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' ) .to(lowerCAmelCase_ ) .eval() ) SCREAMING_SNAKE_CASE_ = self.default_image_processor SCREAMING_SNAKE_CASE_ = image_processor( [np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , ) SCREAMING_SNAKE_CASE_ = inputs['''pixel_values'''].to(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE_ = [el.to(lowerCAmelCase_ ) for el in inputs['''mask_labels''']] SCREAMING_SNAKE_CASE_ = [el.to(lowerCAmelCase_ ) for el in inputs['''class_labels''']] with torch.no_grad(): SCREAMING_SNAKE_CASE_ = model(**lowerCAmelCase_ ) self.assertTrue(outputs.loss is not None )
393
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class snake_case ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self : Dict ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE_ = mock.Mock() SCREAMING_SNAKE_CASE_ = 500 SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = HTTPError SCREAMING_SNAKE_CASE_ = {} # Download this model to make sure it's in the cache. SCREAMING_SNAKE_CASE_ = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase_ ) as mock_head: SCREAMING_SNAKE_CASE_ = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = mock.Mock() SCREAMING_SNAKE_CASE_ = 500 SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = HTTPError SCREAMING_SNAKE_CASE_ = {} # Download this model to make sure it's in the cache. SCREAMING_SNAKE_CASE_ = GPTaTokenizerFast.from_pretrained('''gpt2''' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase_ ) as mock_head: SCREAMING_SNAKE_CASE_ = GPTaTokenizerFast.from_pretrained('''gpt2''' ) # This check we did call the fake head request mock_head.assert_called() def _lowercase ( self : Dict ) -> Optional[int]: """simple docstring""" try: SCREAMING_SNAKE_CASE_ = tempfile.mktemp() with open(lowerCAmelCase_ , '''wb''' ) as f: http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , lowerCAmelCase_ ) SCREAMING_SNAKE_CASE_ = AlbertTokenizer.from_pretrained(lowerCAmelCase_ ) finally: os.remove(lowerCAmelCase_ ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile('''tokenizer.json''' ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open('''tokenizer.json''' , '''wb''' ) as f: http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , lowerCAmelCase_ ) SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size , 1_000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove('''tokenizer.json''' ) def _lowercase ( self : int ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' ) @is_staging_test class snake_case ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""] @classmethod def _lowercase ( cls : Optional[Any] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ = TOKEN HfFolder.save_token(lowerCAmelCase_ ) @classmethod def _lowercase ( cls : Optional[int] ) -> Union[str, Any]: """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-tokenizer''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''' ) except HTTPError: pass def _lowercase ( self : List[Any] ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE_ = os.path.join(lowerCAmelCase_ , '''vocab.txt''' ) with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) SCREAMING_SNAKE_CASE_ = BertTokenizer(lowerCAmelCase_ ) tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token ) SCREAMING_SNAKE_CASE_ = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id='''test-tokenizer''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(lowerCAmelCase_ , repo_id='''test-tokenizer''' , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token ) SCREAMING_SNAKE_CASE_ = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) def _lowercase ( self : Dict ) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE_ = os.path.join(lowerCAmelCase_ , '''vocab.txt''' ) with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) SCREAMING_SNAKE_CASE_ = BertTokenizer(lowerCAmelCase_ ) tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token ) SCREAMING_SNAKE_CASE_ = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( lowerCAmelCase_ , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token ) SCREAMING_SNAKE_CASE_ = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' ) self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab ) @require_tokenizers def _lowercase ( self : Any ) -> List[Any]: """simple docstring""" CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE_ = os.path.join(lowerCAmelCase_ , '''vocab.txt''' ) with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) SCREAMING_SNAKE_CASE_ = CustomTokenizer(lowerCAmelCase_ ) # No fast custom tokenizer tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token ) SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=lowerCAmelCase_ ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE_ = os.path.join(lowerCAmelCase_ , '''vocab.txt''' ) with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) SCREAMING_SNAKE_CASE_ = BertTokenizerFast.from_pretrained(lowerCAmelCase_ ) bert_tokenizer.save_pretrained(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE_ = CustomTokenizerFast.from_pretrained(lowerCAmelCase_ ) tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token ) SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=lowerCAmelCase_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''' ) SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained( F'''{USER}/test-dynamic-tokenizer''' , use_fast=lowerCAmelCase_ , trust_remote_code=lowerCAmelCase_ ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' ) class snake_case ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self : Any ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = Trie() trie.add('''Hello 友達''' ) self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} ) trie.add('''Hello''' ) trie.data self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} ) def _lowercase ( self : Union[str, Any] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ = Trie() self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS] This is a extra_id_100'''] ) trie.add('''[CLS]''' ) trie.add('''extra_id_1''' ) trie.add('''extra_id_100''' ) self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] ) def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = Trie() trie.add('''A''' ) self.assertEqual(trie.split('''ABC''' ) , ['''A''', '''BC'''] ) self.assertEqual(trie.split('''BCA''' ) , ['''BC''', '''A'''] ) def _lowercase ( self : Tuple ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ = Trie() trie.add('''TOKEN]''' ) trie.add('''[SPECIAL_TOKEN]''' ) self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] ) def _lowercase ( self : Optional[int] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ = Trie() trie.add('''A''' ) trie.add('''P''' ) trie.add('''[SPECIAL_TOKEN]''' ) self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] ) def _lowercase ( self : List[str] ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = Trie() trie.add('''AB''' ) trie.add('''B''' ) trie.add('''C''' ) self.assertEqual(trie.split('''ABC''' ) , ['''AB''', '''C'''] ) def _lowercase ( self : Any ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = Trie() trie.add('''ABC''' ) trie.add('''B''' ) trie.add('''CD''' ) self.assertEqual(trie.split('''ABCD''' ) , ['''ABC''', '''D'''] ) def _lowercase ( self : Tuple ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE_ = Trie() SCREAMING_SNAKE_CASE_ = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3] ) self.assertEqual(lowerCAmelCase_ , ['''AB''', '''C'''] )
393
1
__magic_name__ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/''' def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ): # Make sure the supplied data is a bytes-like object if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): snake_case__ = F"""a bytes-like object is required, not '{data.__class__.__name__}'""" raise TypeError(__lowerCAmelCase ) snake_case__ = "".join(bin(__lowerCAmelCase )[2:].zfill(8 ) for byte in data ) snake_case__ = len(__lowerCAmelCase ) % 6 != 0 if padding_needed: # The padding that will be added later snake_case__ = B"=" * ((6 - len(__lowerCAmelCase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(__lowerCAmelCase ) % 6) else: snake_case__ = B"" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(__lowerCAmelCase ) , 6 ) ).encode() + padding ) def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ): # Make sure encoded_data is either a string or a bytes-like object if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) and not isinstance(__lowerCAmelCase , __lowerCAmelCase ): snake_case__ = ( "argument should be a bytes-like object or ASCII string, " F"""not '{encoded_data.__class__.__name__}'""" ) raise TypeError(__lowerCAmelCase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(__lowerCAmelCase , __lowerCAmelCase ): try: snake_case__ = encoded_data.decode("utf-8" ) except UnicodeDecodeError: raise ValueError("base64 encoded data should only contain ASCII characters" ) snake_case__ = encoded_data.count("=" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(__lowerCAmelCase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one snake_case__ = encoded_data[:-padding] snake_case__ = "".join( bin(B64_CHARSET.index(__lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: snake_case__ = "".join( bin(B64_CHARSET.index(__lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data ) snake_case__ = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(__lowerCAmelCase ) , 8 ) ] return bytes(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
530
import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ): snake_case__ = torch.exp(__lowerCAmelCase ) snake_case__ = torch.sum(__lowerCAmelCase , dim=1 ) # sum of exp(x_i) snake_case__ = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i) return torch.log(__lowerCAmelCase ) - B / A class _SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self , lowerCamelCase ): super().__init__() snake_case__ = config.output_attentions snake_case__ = config.output_hidden_states snake_case__ = nn.ModuleList([BertLayer(lowerCamelCase ) for _ in range(config.num_hidden_layers )] ) snake_case__ = nn.ModuleList([BertHighway(lowerCamelCase ) for _ in range(config.num_hidden_layers )] ) snake_case__ = [-1 for _ in range(config.num_hidden_layers )] def A_ ( self , lowerCamelCase ): if (type(lowerCamelCase ) is float) or (type(lowerCamelCase ) is int): for i in range(len(self.early_exit_entropy ) ): snake_case__ = x else: snake_case__ = x def A_ ( self , lowerCamelCase ): snake_case__ = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def A_ ( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ): snake_case__ = () snake_case__ = () snake_case__ = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: snake_case__ = all_hidden_states + (hidden_states,) snake_case__ = layer_module( lowerCamelCase , lowerCamelCase , head_mask[i] , lowerCamelCase , lowerCamelCase ) snake_case__ = layer_outputs[0] if self.output_attentions: snake_case__ = all_attentions + (layer_outputs[1],) snake_case__ = (hidden_states,) if self.output_hidden_states: snake_case__ = current_outputs + (all_hidden_states,) if self.output_attentions: snake_case__ = current_outputs + (all_attentions,) snake_case__ = self.highway[i](lowerCamelCase ) # logits, pooled_output if not self.training: snake_case__ = highway_exit[0] snake_case__ = entropy(lowerCamelCase ) snake_case__ = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy snake_case__ = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: snake_case__ = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(lowerCamelCase , i + 1 ) else: snake_case__ = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: snake_case__ = all_hidden_states + (hidden_states,) snake_case__ = (hidden_states,) if self.output_hidden_states: snake_case__ = outputs + (all_hidden_states,) if self.output_attentions: snake_case__ = outputs + (all_attentions,) snake_case__ = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( 'The Bert Model transformer with early exiting (DeeBERT). ' , __UpperCamelCase , ) class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ): def __init__( self , lowerCamelCase ): super().__init__(lowerCamelCase ) snake_case__ = config snake_case__ = BertEmbeddings(lowerCamelCase ) snake_case__ = DeeBertEncoder(lowerCamelCase ) snake_case__ = BertPooler(lowerCamelCase ) self.init_weights() def A_ ( self ): self.encoder.init_highway_pooler(self.pooler ) def A_ ( self ): return self.embeddings.word_embeddings def A_ ( self , lowerCamelCase ): snake_case__ = value def A_ ( self , lowerCamelCase ): for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(lowerCamelCase ) @add_start_docstrings_to_model_forward(lowerCamelCase ) def A_ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: snake_case__ = input_ids.size() elif inputs_embeds is not None: snake_case__ = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds" ) snake_case__ = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: snake_case__ = torch.ones(lowerCamelCase , device=lowerCamelCase ) if encoder_attention_mask is None: snake_case__ = torch.ones(lowerCamelCase , device=lowerCamelCase ) if token_type_ids is None: snake_case__ = torch.zeros(lowerCamelCase , dtype=torch.long , device=lowerCamelCase ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. snake_case__ = self.get_extended_attention_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: snake_case__ = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: snake_case__ = encoder_attention_mask[:, None, None, :] snake_case__ = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility snake_case__ = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] snake_case__ = self.get_head_mask(lowerCamelCase , self.config.num_hidden_layers ) snake_case__ = self.embeddings( input_ids=lowerCamelCase , position_ids=lowerCamelCase , token_type_ids=lowerCamelCase , inputs_embeds=lowerCamelCase ) snake_case__ = self.encoder( lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , ) snake_case__ = encoder_outputs[0] snake_case__ = self.pooler(lowerCamelCase ) snake_case__ = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ): def __init__( self , lowerCamelCase , lowerCamelCase ): snake_case__ = message snake_case__ = exit_layer # start from 1! class _SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self , lowerCamelCase ): super().__init__() snake_case__ = BertPooler(lowerCamelCase ) snake_case__ = nn.Dropout(config.hidden_dropout_prob ) snake_case__ = nn.Linear(config.hidden_size , config.num_labels ) def A_ ( self , lowerCamelCase ): # Pooler snake_case__ = encoder_outputs[0] snake_case__ = self.pooler(lowerCamelCase ) # "return" pooler_output # BertModel snake_case__ = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification snake_case__ = bmodel_output[1] snake_case__ = self.dropout(lowerCamelCase ) snake_case__ = self.classifier(lowerCamelCase ) return logits, pooled_output @add_start_docstrings( 'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , __UpperCamelCase , ) class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ): def __init__( self , lowerCamelCase ): super().__init__(lowerCamelCase ) snake_case__ = config.num_labels snake_case__ = config.num_hidden_layers snake_case__ = DeeBertModel(lowerCamelCase ) snake_case__ = nn.Dropout(config.hidden_dropout_prob ) snake_case__ = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(lowerCamelCase ) def A_ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=-1 , lowerCamelCase=False , ): snake_case__ = self.num_layers try: snake_case__ = self.bert( lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , position_ids=lowerCamelCase , head_mask=lowerCamelCase , inputs_embeds=lowerCamelCase , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits snake_case__ = outputs[1] snake_case__ = self.dropout(lowerCamelCase ) snake_case__ = self.classifier(lowerCamelCase ) snake_case__ = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: snake_case__ = e.message snake_case__ = e.exit_layer snake_case__ = outputs[0] if not self.training: snake_case__ = entropy(lowerCamelCase ) snake_case__ = [] snake_case__ = [] if labels is not None: if self.num_labels == 1: # We are doing regression snake_case__ = MSELoss() snake_case__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: snake_case__ = CrossEntropyLoss() snake_case__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits snake_case__ = [] for highway_exit in outputs[-1]: snake_case__ = highway_exit[0] if not self.training: highway_logits_all.append(lowerCamelCase ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression snake_case__ = MSELoss() snake_case__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: snake_case__ = CrossEntropyLoss() snake_case__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(lowerCamelCase ) if train_highway: snake_case__ = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: snake_case__ = (loss,) + outputs if not self.training: snake_case__ = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: snake_case__ = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
530
1
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def _A ( ): """simple docstring""" lowerCAmelCase__ = ArgumentParser( description=( "PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes" ) ) # Optional arguments for the launch helper parser.add_argument("--num_cores" , type=lowerCAmelCase_ , default=1 , help="Number of TPU cores to use (1 or 8)." ) # positional parser.add_argument( "training_script" , type=lowerCAmelCase_ , help=( "The full path to the single TPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script" ) , ) # rest from the training program parser.add_argument("training_script_args" , nargs=lowerCAmelCase_ ) return parser.parse_args() def _A ( ): """simple docstring""" lowerCAmelCase__ = parse_args() # Import training_script as a module. lowerCAmelCase__ = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) lowerCAmelCase__ = script_fpath.stem lowerCAmelCase__ = importlib.import_module(lowerCAmelCase_ ) # Patch sys.argv lowerCAmelCase__ = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
61
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=SCREAMING_SNAKE_CASE__ ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __lowerCamelCase = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) __lowerCamelCase = Features({"audio": Audio()} ) __lowerCamelCase = Features({"labels": ClassLabel} ) __lowerCamelCase = "audio" __lowerCamelCase = "labels" def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' if self.label_column not in features: raise ValueError(F'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , snake_case__ ): raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' ) lowercase__ : int= copy.deepcopy(self ) lowercase__ : int= self.label_schema.copy() lowercase__ : Any= features[self.label_column] lowercase__ : int= label_schema return task_template @property def UpperCAmelCase_ ( self ): '''simple docstring''' return { self.audio_column: "audio", self.label_column: "labels", }
218
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class A_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self : Dict ): lowercase = [[1, 2, 4], [1, 2, 3, 4]] lowercase = DisjunctiveConstraint(lowercase__ ) self.assertTrue(isinstance(dc.token_ids , lowercase__ ) ) with self.assertRaises(lowercase__ ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(lowercase__ ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(lowercase__ ): DisjunctiveConstraint(lowercase__ ) # fails here def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): lowercase = [[1, 2, 3], [1, 2, 4]] lowercase = DisjunctiveConstraint(lowercase__ ) lowercase = dc.update(1 ) lowercase = stepped is True and completed is False and reset is False self.assertTrue(lowercase__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase = dc.update(2 ) lowercase = stepped is True and completed is False and reset is False self.assertTrue(lowercase__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase = dc.update(3 ) lowercase = stepped is True and completed is True and reset is False self.assertTrue(lowercase__ ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowercase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] lowercase = DisjunctiveConstraint(lowercase__ ) lowercase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) lowercase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() lowercase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) lowercase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
702
from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class A_ : _A :int _A :int class A_ : def __init__( self : List[str] , snake_case__ : int ): lowercase = [[] for _ in range(snake_case__ )] lowercase = size def __getitem__( self : Optional[int] , snake_case__ : int ): return iter(self._graph[vertex] ) @property def SCREAMING_SNAKE_CASE__ ( self : int ): return self._size def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int ): if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(snake_case__ , snake_case__ ) ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : int , snake_case__ : int ): lowercase = deque([start_vertex] ) lowercase = [None] * self.size lowercase = 0 while queue: lowercase = queue.popleft() lowercase = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: lowercase = current_distance + edge.weight lowercase = distances[edge.destination_vertex] if ( isinstance(snake_case__ , snake_case__ ) and new_distance >= dest_vertex_distance ): continue lowercase = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
72
0
"""simple docstring""" from __future__ import annotations def __snake_case ( _lowercase ): """simple docstring""" return len(set(_lowercase ) ) == len(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod()
34
"""simple docstring""" import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem A = importlib.util.find_spec('s3fs') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 A = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: str ): """simple docstring""" if "://" in dataset_path: snake_case : List[str] = dataset_path.split("://" )[1] return dataset_path def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: fsspec.AbstractFileSystem ): """simple docstring""" if fs is not None and fs.protocol != "file": return True else: return False def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: fsspec.AbstractFileSystem , lowerCamelCase_: str , lowerCamelCase_: str ): """simple docstring""" snake_case : int = not is_remote_filesystem(lowerCamelCase_ ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(lowerCamelCase_ ) , fs._strip_protocol(lowerCamelCase_ ) ) else: fs.mv(lowerCamelCase_ , lowerCamelCase_ , recursive=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( ): """simple docstring""" if hasattr(fsspec.asyn , "reset_lock" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: snake_case : Union[str, Any] = None snake_case : Optional[int] = None snake_case : List[str] = threading.Lock()
449
0
'''simple docstring''' def _lowerCAmelCase ( __a ) -> list[list]: '''simple docstring''' _UpperCamelCase :str =current_set.copy() for row_index, row in enumerate(__a ): _UpperCamelCase :Tuple =row[0] for column_index, column in enumerate(__a ): if magnitude == 0: _UpperCamelCase :Optional[int] =column continue _UpperCamelCase :Optional[Any] =column / magnitude # Subtract to cancel term _UpperCamelCase :Union[str, Any] =current_set[0] _UpperCamelCase :List[Any] =[first_row] _UpperCamelCase :int =current_set[1::] for row in current_set: _UpperCamelCase :List[str] =[] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(__a ) continue for column_index in range(len(__a ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(__a ) # Create next recursion iteration set if len(final_set[0] ) != 3: _UpperCamelCase :Any =final_set[0] _UpperCamelCase :Any =[] _UpperCamelCase :Optional[Any] =[] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _UpperCamelCase :Union[str, Any] =simplify(__a ) for i in range(len(__a ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , __a ) _UpperCamelCase :List[str] =resultant return final_set def _lowerCAmelCase ( __a ) -> list: '''simple docstring''' if len(__a ) == 0: raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) _UpperCamelCase :List[Any] =len(__a ) + 1 if any(len(__a ) != _length for item in equations ): raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) for row in equations: if any(not isinstance(__a , (int, float) ) for column in row ): raise ValueError("""solve_simultaneous() requires lists of integers""" ) if len(__a ) == 1: return [equations[0][-1] / equations[0][0]] _UpperCamelCase :List[Any] =equations.copy() if any(0 in row for row in data_set ): _UpperCamelCase :Dict =data_set.copy() _UpperCamelCase :str =[] for row_index, row in enumerate(__a ): if 0 not in row: _UpperCamelCase :Union[str, Any] =data_set.pop(__a ) break if not full_row: raise ValueError("""solve_simultaneous() requires at least 1 full equation""" ) data_set.insert(0 , __a ) _UpperCamelCase :Union[str, Any] =data_set.copy() _UpperCamelCase :Union[str, Any] =simplify(__a ) _UpperCamelCase :Tuple =simplified[::-1] _UpperCamelCase :list =[] for row in simplified: _UpperCamelCase :Union[str, Any] =row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _UpperCamelCase :Optional[Any] =row.copy()[: len(__a ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(__a ) == 0: solutions.append(0 ) continue _UpperCamelCase :int =temp_row[1::] _UpperCamelCase :str =temp_row[::-1] for column_index, column in enumerate(__a ): current_solution -= column * solutions[column_index] solutions.append(__a ) _UpperCamelCase :Any =[] for item in solutions: final.append(float(round(__a , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() _lowerCamelCase : Optional[Any] = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
512
'''simple docstring''' import colorsys from PIL import Image # type: ignore def _lowerCAmelCase ( __a , __a , __a ) -> float: '''simple docstring''' _UpperCamelCase :Dict =x _UpperCamelCase :Any =y for step in range(__a ): # noqa: B007 _UpperCamelCase :List[Any] =a * a - b * b + x _UpperCamelCase :Union[str, Any] =2 * a * b + y _UpperCamelCase :str =a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def _lowerCAmelCase ( __a ) -> tuple: '''simple docstring''' if distance == 1: return (0, 0, 0) else: return (2_55, 2_55, 2_55) def _lowerCAmelCase ( __a ) -> tuple: '''simple docstring''' if distance == 1: return (0, 0, 0) else: return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(__a , 1 , 1 ) ) def _lowerCAmelCase ( __a = 8_00 , __a = 6_00 , __a = -0.6 , __a = 0 , __a = 3.2 , __a = 50 , __a = True , ) -> Image.Image: '''simple docstring''' _UpperCamelCase :List[str] =Image.new("""RGB""" , (image_width, image_height) ) _UpperCamelCase :Tuple =img.load() # loop through the image-coordinates for image_x in range(__a ): for image_y in range(__a ): # determine the figure-coordinates based on the image-coordinates _UpperCamelCase :Tuple =figure_width / image_width * image_height _UpperCamelCase :Union[str, Any] =figure_center_x + (image_x / image_width - 0.5) * figure_width _UpperCamelCase :Dict =figure_center_y + (image_y / image_height - 0.5) * figure_height _UpperCamelCase :Any =get_distance(__a , __a , __a ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: _UpperCamelCase :Any =get_color_coded_rgb(__a ) else: _UpperCamelCase :Any =get_black_and_white_rgb(__a ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure _lowerCamelCase : str = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
512
1
import argparse from pathlib import Path import fairseq import torch from fairseq.models.xmod import XMODModel as FairseqXmodModel from packaging import version from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("""0.12.2"""): raise Exception("""requires fairseq >= 0.12.2""") if version.parse(fairseq.__version__) > version.parse("""2"""): raise Exception("""requires fairseq < v2""") logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = """Hello, World!""" a_ = """en_XX""" def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : bool ): __lowerCamelCase = Path('''data_bin''' ) __lowerCamelCase = FairseqXmodModel.from_pretrained( model_name_or_path=str(Path(_UpperCamelCase ).parent ) ,checkpoint_file=Path(_UpperCamelCase ).name ,_name='''xmod_base''' ,arch='''xmod_base''' ,task='''multilingual_masked_lm''' ,data_name_or_path=str(_UpperCamelCase ) ,bpe='''sentencepiece''' ,sentencepiece_model=str(Path(_UpperCamelCase ).parent / '''sentencepiece.bpe.model''' ) ,src_dict=str(data_dir / '''dict.txt''' ) ,) xmod.eval() # disable dropout print(_UpperCamelCase ) __lowerCamelCase = xmod.model.encoder.sentence_encoder __lowerCamelCase = XmodConfig( vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings ,hidden_size=xmod.cfg.model.encoder_embed_dim ,num_hidden_layers=xmod.cfg.model.encoder_layers ,num_attention_heads=xmod.cfg.model.encoder_attention_heads ,intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=5_14 ,type_vocab_size=1 ,layer_norm_eps=1e-5 ,pre_norm=xmod.cfg.model.encoder_normalize_before ,adapter_reduction_factor=getattr(xmod.cfg.model ,'''bottleneck''' ,2 ) ,adapter_layer_norm=xmod.cfg.model.adapter_layer_norm ,adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm ,ln_before_adapter=xmod.cfg.model.ln_before_adapter ,languages=xmod.cfg.model.languages ,) if classification_head: __lowerCamelCase = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our X-MOD config:''' ,_UpperCamelCase ) __lowerCamelCase = XmodForSequenceClassification(_UpperCamelCase ) if classification_head else XmodForMaskedLM(_UpperCamelCase ) model.eval() # Now let's copy all the weights. # Embeddings __lowerCamelCase = xmod_sent_encoder.embed_tokens.weight __lowerCamelCase = xmod_sent_encoder.embed_positions.weight __lowerCamelCase = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them. __lowerCamelCase = xmod_sent_encoder.layernorm_embedding.weight __lowerCamelCase = xmod_sent_encoder.layernorm_embedding.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer __lowerCamelCase = model.roberta.encoder.layer[i] __lowerCamelCase = xmod_sent_encoder.layers[i] # self attention __lowerCamelCase = layer.attention.self if not ( xmod_layer.self_attn.k_proj.weight.data.shape == xmod_layer.self_attn.q_proj.weight.data.shape == xmod_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ): raise AssertionError('''Dimensions of self-attention weights do not match.''' ) __lowerCamelCase = xmod_layer.self_attn.q_proj.weight __lowerCamelCase = xmod_layer.self_attn.q_proj.bias __lowerCamelCase = xmod_layer.self_attn.k_proj.weight __lowerCamelCase = xmod_layer.self_attn.k_proj.bias __lowerCamelCase = xmod_layer.self_attn.v_proj.weight __lowerCamelCase = xmod_layer.self_attn.v_proj.bias # self-attention output __lowerCamelCase = layer.attention.output if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape: raise AssertionError('''Dimensions of self-attention output weights do not match.''' ) __lowerCamelCase = xmod_layer.self_attn.out_proj.weight __lowerCamelCase = xmod_layer.self_attn.out_proj.bias __lowerCamelCase = xmod_layer.self_attn_layer_norm.weight __lowerCamelCase = xmod_layer.self_attn_layer_norm.bias # intermediate __lowerCamelCase = layer.intermediate if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of intermediate weights do not match.''' ) __lowerCamelCase = xmod_layer.fca.weight __lowerCamelCase = xmod_layer.fca.bias # output __lowerCamelCase = layer.output if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape: raise AssertionError('''Dimensions of feed-forward weights do not match.''' ) __lowerCamelCase = xmod_layer.fca.weight __lowerCamelCase = xmod_layer.fca.bias __lowerCamelCase = xmod_layer.final_layer_norm.weight __lowerCamelCase = xmod_layer.final_layer_norm.bias if bert_output.adapter_layer_norm is not None: __lowerCamelCase = xmod_layer.adapter_layer_norm.weight __lowerCamelCase = xmod_layer.adapter_layer_norm.bias if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ): raise AssertionError('''Lists of language adapters do not match.''' ) for lang_code, adapter in xmod_layer.adapter_modules.items(): __lowerCamelCase = bert_output.adapter_modules[lang_code] __lowerCamelCase = xmod_layer.adapter_modules[lang_code] __lowerCamelCase = from_adapter.fca.weight __lowerCamelCase = from_adapter.fca.bias __lowerCamelCase = from_adapter.fca.weight __lowerCamelCase = from_adapter.fca.bias # end of layer if xmod_sent_encoder.layer_norm is not None: __lowerCamelCase = xmod_sent_encoder.layer_norm.weight __lowerCamelCase = xmod_sent_encoder.layer_norm.bias if classification_head: __lowerCamelCase = xmod.model.classification_heads['''mnli'''].dense.weight __lowerCamelCase = xmod.model.classification_heads['''mnli'''].dense.bias __lowerCamelCase = xmod.model.classification_heads['''mnli'''].out_proj.weight __lowerCamelCase = xmod.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head __lowerCamelCase = xmod.model.encoder.lm_head.dense.weight __lowerCamelCase = xmod.model.encoder.lm_head.dense.bias __lowerCamelCase = xmod.model.encoder.lm_head.layer_norm.weight __lowerCamelCase = xmod.model.encoder.lm_head.layer_norm.bias __lowerCamelCase = xmod.model.encoder.lm_head.weight __lowerCamelCase = xmod.model.encoder.lm_head.bias # Let's check that we get the same results. __lowerCamelCase = xmod.encode(_UpperCamelCase ).unsqueeze(0 ) # batch of size 1 model.roberta.set_default_language(_UpperCamelCase ) __lowerCamelCase = model(_UpperCamelCase )[0] if classification_head: __lowerCamelCase = xmod.model.classification_heads['''mnli'''](xmod.extract_features(_UpperCamelCase ) ) else: __lowerCamelCase = xmod.model(_UpperCamelCase ,lang_id=[SAMPLE_LANGUAGE] )[0] print(our_output.shape ,their_output.shape ) __lowerCamelCase = torch.max(torch.abs(our_output - their_output ) ).item() print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7 __lowerCamelCase = torch.allclose(_UpperCamelCase ,_UpperCamelCase ,atol=1e-3 ) print('''Do both models output the same tensors?''' ,'''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) Path(_UpperCamelCase ).mkdir(parents=_UpperCamelCase ,exist_ok=_UpperCamelCase ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--xmod_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--classification_head""", action="""store_true""", help="""Whether to convert a final classification head.""" ) a_ = parser.parse_args() convert_xmod_checkpoint_to_pytorch( args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
175
import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def a__ ( _UpperCamelCase : List[str] ): __lowerCamelCase = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __lowerCamelCase = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: __lowerCamelCase = 4 __lowerCamelCase = 48 __lowerCamelCase = '''pixelshuffle_aux''' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __lowerCamelCase = [6, 6, 6, 6] __lowerCamelCase = 60 __lowerCamelCase = [6, 6, 6, 6] __lowerCamelCase = '''pixelshuffledirect''' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __lowerCamelCase = 4 __lowerCamelCase = '''nearest+conv''' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: __lowerCamelCase = 1 __lowerCamelCase = 1 __lowerCamelCase = 1_26 __lowerCamelCase = 7 __lowerCamelCase = 255.0 __lowerCamelCase = '''''' return config def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : List[str] ): if "patch_embed.proj" in name and "layers" not in name: __lowerCamelCase = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: __lowerCamelCase = name.replace('''patch_embed.norm''' ,'''embeddings.patch_embeddings.layernorm''' ) if "layers" in name: __lowerCamelCase = name.replace('''layers''' ,'''encoder.stages''' ) if "residual_group.blocks" in name: __lowerCamelCase = name.replace('''residual_group.blocks''' ,'''layers''' ) if "attn.proj" in name: __lowerCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' ) if "attn" in name: __lowerCamelCase = name.replace('''attn''' ,'''attention.self''' ) if "norm1" in name: __lowerCamelCase = name.replace('''norm1''' ,'''layernorm_before''' ) if "norm2" in name: __lowerCamelCase = name.replace('''norm2''' ,'''layernorm_after''' ) if "mlp.fc1" in name: __lowerCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' ) if "mlp.fc2" in name: __lowerCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' ) if "q_bias" in name: __lowerCamelCase = name.replace('''q_bias''' ,'''query.bias''' ) if "k_bias" in name: __lowerCamelCase = name.replace('''k_bias''' ,'''key.bias''' ) if "v_bias" in name: __lowerCamelCase = name.replace('''v_bias''' ,'''value.bias''' ) if "cpb_mlp" in name: __lowerCamelCase = name.replace('''cpb_mlp''' ,'''continuous_position_bias_mlp''' ) if "patch_embed.proj" in name: __lowerCamelCase = name.replace('''patch_embed.proj''' ,'''patch_embed.projection''' ) if name == "norm.weight": __lowerCamelCase = '''layernorm.weight''' if name == "norm.bias": __lowerCamelCase = '''layernorm.bias''' if "conv_first" in name: __lowerCamelCase = name.replace('''conv_first''' ,'''first_convolution''' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: __lowerCamelCase = name.replace('''conv_last''' ,'''final_convolution''' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: __lowerCamelCase = name.replace('''conv_before_upsample.0''' ,'''conv_before_upsample''' ) if "upsample.0" in name: __lowerCamelCase = name.replace('''upsample.0''' ,'''upsample.convolution_0''' ) if "upsample.2" in name: __lowerCamelCase = name.replace('''upsample.2''' ,'''upsample.convolution_1''' ) __lowerCamelCase = '''upsample.''' + name elif config.upsampler == "pixelshuffledirect": __lowerCamelCase = name.replace('''upsample.0.weight''' ,'''upsample.conv.weight''' ) __lowerCamelCase = name.replace('''upsample.0.bias''' ,'''upsample.conv.bias''' ) else: pass else: __lowerCamelCase = '''swin2sr.''' + name return name def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : Union[str, Any] ): for key in orig_state_dict.copy().keys(): __lowerCamelCase = orig_state_dict.pop(_UpperCamelCase ) if "qkv" in key: __lowerCamelCase = key.split('''.''' ) __lowerCamelCase = int(key_split[1] ) __lowerCamelCase = int(key_split[4] ) __lowerCamelCase = config.embed_dim if "weight" in key: __lowerCamelCase = val[:dim, :] __lowerCamelCase = val[dim : dim * 2, :] __lowerCamelCase = val[-dim:, :] else: __lowerCamelCase = val[:dim] __lowerCamelCase = val[dim : dim * 2] __lowerCamelCase = val[-dim:] pass else: __lowerCamelCase = val return orig_state_dict def a__ ( _UpperCamelCase : str ,_UpperCamelCase : int ,_UpperCamelCase : Any ): __lowerCamelCase = get_config(_UpperCamelCase ) __lowerCamelCase = SwinaSRForImageSuperResolution(_UpperCamelCase ) model.eval() __lowerCamelCase = torch.hub.load_state_dict_from_url(_UpperCamelCase ,map_location='''cpu''' ) __lowerCamelCase = convert_state_dict(_UpperCamelCase ,_UpperCamelCase ) __lowerCamelCase ,__lowerCamelCase = model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase ) if len(_UpperCamelCase ) > 0: raise ValueError('''Missing keys when converting: {}'''.format(_UpperCamelCase ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(F"""Unexpected key {key} in state_dict""" ) # verify values __lowerCamelCase = '''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true''' __lowerCamelCase = Image.open(requests.get(_UpperCamelCase ,stream=_UpperCamelCase ).raw ).convert('''RGB''' ) __lowerCamelCase = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values __lowerCamelCase = 1_26 if '''Jpeg''' in checkpoint_url else 2_56 __lowerCamelCase = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] ,std=[0.229, 0.224, 0.225] ), ] ) __lowerCamelCase = transforms(_UpperCamelCase ).unsqueeze(0 ) if config.num_channels == 1: __lowerCamelCase = pixel_values[:, 0, :, :].unsqueeze(1 ) __lowerCamelCase = model(_UpperCamelCase ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: __lowerCamelCase = torch.Size([1, 3, 5_12, 5_12] ) __lowerCamelCase = torch.tensor( [[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __lowerCamelCase = torch.Size([1, 3, 10_24, 10_24] ) __lowerCamelCase = torch.tensor( [[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here __lowerCamelCase = torch.Size([1, 3, 10_24, 10_24] ) __lowerCamelCase = torch.tensor( [[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __lowerCamelCase = torch.Size([1, 3, 5_12, 5_12] ) __lowerCamelCase = torch.tensor( [[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __lowerCamelCase = torch.Size([1, 3, 10_24, 10_24] ) __lowerCamelCase = torch.tensor( [[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] ) assert ( outputs.reconstruction.shape == expected_shape ), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}""" assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] ,_UpperCamelCase ,atol=1e-3 ) print('''Looks ok!''' ) __lowerCamelCase = { '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': ( '''swin2SR-classical-sr-x2-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': ( '''swin2SR-classical-sr-x4-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': ( '''swin2SR-compressed-sr-x4-48''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': ( '''swin2SR-lightweight-x2-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': ( '''swin2SR-realworld-sr-x4-64-bsrgan-psnr''' ), } __lowerCamelCase = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_UpperCamelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(_UpperCamelCase ) if push_to_hub: model.push_to_hub(F"""caidas/{model_name}""" ) processor.push_to_hub(F"""caidas/{model_name}""" ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""", type=str, help="""URL of the original Swin2SR checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""") a_ = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
175
1
import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } _lowercase = { "vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"}, "merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"}, } _lowercase = { "ctrl": 2_56, } _lowercase = { "Pregnancy": 16_86_29, "Christianity": 76_75, "Explain": 10_64_23, "Fitness": 6_34_40, "Saving": 6_31_63, "Ask": 2_71_71, "Ass": 9_59_85, "Joke": 16_35_09, "Questions": 4_56_22, "Thoughts": 4_96_05, "Retail": 5_23_42, "Feminism": 16_43_38, "Writing": 1_19_92, "Atheism": 19_22_63, "Netflix": 4_86_16, "Computing": 3_96_39, "Opinion": 4_32_13, "Alone": 4_49_67, "Funny": 5_89_17, "Gaming": 4_03_58, "Human": 40_88, "India": 13_31, "Joker": 7_71_38, "Diet": 3_62_06, "Legal": 1_18_59, "Norman": 49_39, "Tip": 7_26_89, "Weight": 5_23_43, "Movies": 4_62_73, "Running": 2_34_25, "Science": 20_90, "Horror": 3_77_93, "Confession": 6_05_72, "Finance": 1_22_50, "Politics": 1_63_60, "Scary": 19_19_85, "Support": 1_26_54, "Technologies": 3_25_16, "Teenage": 6_61_60, "Event": 3_27_69, "Learned": 6_74_60, "Notion": 18_27_70, "Wikipedia": 3_75_83, "Books": 66_65, "Extract": 7_60_50, "Confessions": 10_27_01, "Conspiracy": 7_59_32, "Links": 6_36_74, "Narcissus": 15_04_25, "Relationship": 5_47_66, "Relationships": 13_47_96, "Reviews": 4_16_71, "News": 42_56, "Translation": 2_68_20, "multilingual": 12_84_06, } def _A (UpperCamelCase : Union[str, Any] ) ->Any: '''simple docstring''' lowerCamelCase__ : Optional[Any] = set() lowerCamelCase__ : int = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase__ : Optional[int] = char lowerCamelCase__ : List[Any] = set(lowerCamelCase__ ) return pairs class __A ( A_ ): UpperCamelCase :List[Any] = VOCAB_FILES_NAMES UpperCamelCase :Optional[int] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase :Tuple = CONTROL_CODES def __init__(self , __magic_name__ , __magic_name__ , __magic_name__="<unk>" , **__magic_name__ ): super().__init__(unk_token=UpperCamelCase_ , **UpperCamelCase_ ) with open(UpperCamelCase_ , encoding="""utf-8""" ) as vocab_handle: lowerCamelCase__ : Union[str, Any] = json.load(UpperCamelCase_ ) lowerCamelCase__ : Optional[int] = {v: k for k, v in self.encoder.items()} with open(UpperCamelCase_ , encoding="""utf-8""" ) as merges_handle: lowerCamelCase__ : List[Any] = merges_handle.read().split("""\n""" )[1:-1] lowerCamelCase__ : Optional[int] = [tuple(merge.split() ) for merge in merges] lowerCamelCase__ : str = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) lowerCamelCase__ : List[Any] = {} @property def _snake_case (self ): return len(self.encoder ) def _snake_case (self ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case (self , __magic_name__ ): if token in self.cache: return self.cache[token] lowerCamelCase__ : List[Any] = tuple(UpperCamelCase_ ) lowerCamelCase__ : Union[str, Any] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) lowerCamelCase__ : Tuple = get_pairs(UpperCamelCase_ ) if not pairs: return token while True: lowerCamelCase__ : int = min(UpperCamelCase_ , key=lambda __magic_name__ : self.bpe_ranks.get(UpperCamelCase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase__ : Optional[int] = bigram lowerCamelCase__ : int = [] lowerCamelCase__ : Optional[Any] = 0 while i < len(UpperCamelCase_ ): try: lowerCamelCase__ : Any = word.index(UpperCamelCase_ , UpperCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase__ : Optional[Any] = j if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase__ : Dict = tuple(UpperCamelCase_ ) lowerCamelCase__ : int = new_word if len(UpperCamelCase_ ) == 1: break else: lowerCamelCase__ : str = get_pairs(UpperCamelCase_ ) lowerCamelCase__ : Any = "@@ ".join(UpperCamelCase_ ) lowerCamelCase__ : Union[str, Any] = word[:-4] lowerCamelCase__ : Optional[Any] = word return word def _snake_case (self , __magic_name__ ): lowerCamelCase__ : int = [] lowerCamelCase__ : str = re.findall(R"""\S+\n?""" , UpperCamelCase_ ) for token in words: split_tokens.extend(list(self.bpe(UpperCamelCase_ ).split(""" """ ) ) ) return split_tokens def _snake_case (self , __magic_name__ ): return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) ) def _snake_case (self , __magic_name__ ): return self.decoder.get(UpperCamelCase_ , self.unk_token ) def _snake_case (self , __magic_name__ ): lowerCamelCase__ : List[Any] = " ".join(UpperCamelCase_ ).replace("""@@ """ , """""" ).strip() return out_string def _snake_case (self , __magic_name__ , __magic_name__ = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowerCamelCase__ : int = os.path.join( UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCamelCase__ : List[Any] = os.path.join( UpperCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + """\n""" ) lowerCamelCase__ : List[str] = 0 with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __magic_name__ : kv[1] ): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." """ Please check that the tokenizer is not corrupted!""" ) lowerCamelCase__ : List[str] = token_index writer.write(""" """.join(UpperCamelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
710
import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask _lowercase = logging.getLogger(__name__) class __A ( A_ ): UpperCamelCase :Optional[int] = '''token-classification''' def __init__(self , __magic_name__ ): if type(__magic_name__ ) == dict: lowerCamelCase__ : Any = Namespace(**__magic_name__ ) lowerCamelCase__ : str = import_module("""tasks""" ) try: lowerCamelCase__ : Optional[Any] = getattr(__magic_name__ , hparams.task_type ) lowerCamelCase__ : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( f"Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. " f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" ) lowerCamelCase__ : Any = self.token_classification_task.get_labels(hparams.labels ) lowerCamelCase__ : Tuple = CrossEntropyLoss().ignore_index super().__init__(__magic_name__ , len(self.labels ) , self.mode ) def _snake_case (self , **__magic_name__ ): return self.model(**__magic_name__ ) def _snake_case (self , __magic_name__ , __magic_name__ ): lowerCamelCase__ : Tuple = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type != "distilbert": lowerCamelCase__ : Union[str, Any] = ( batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None ) # XLM and RoBERTa don"t use token_type_ids lowerCamelCase__ : List[str] = self(**__magic_name__ ) lowerCamelCase__ : Dict = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def _snake_case (self ): lowerCamelCase__ : Dict = self.hparams for mode in ["train", "dev", "test"]: lowerCamelCase__ : List[str] = self._feature_file(__magic_name__ ) if os.path.exists(__magic_name__ ) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , __magic_name__ ) lowerCamelCase__ : Union[str, Any] = torch.load(__magic_name__ ) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir ) lowerCamelCase__ : int = self.token_classification_task.read_examples_from_file(args.data_dir , __magic_name__ ) lowerCamelCase__ : Tuple = self.token_classification_task.convert_examples_to_features( __magic_name__ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__magic_name__ , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info("""Saving features into cached file %s""" , __magic_name__ ) torch.save(__magic_name__ , __magic_name__ ) def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ = False ): lowerCamelCase__ : Any = self._feature_file(__magic_name__ ) logger.info("""Loading features from cached file %s""" , __magic_name__ ) lowerCamelCase__ : Optional[Any] = torch.load(__magic_name__ ) lowerCamelCase__ : Tuple = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) lowerCamelCase__ : Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: lowerCamelCase__ : str = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: lowerCamelCase__ : int = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) lowerCamelCase__ : Any = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) , batch_size=__magic_name__ ) def _snake_case (self , __magic_name__ , __magic_name__ ): """Compute validation""" "" lowerCamelCase__ : Optional[int] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type != "distilbert": lowerCamelCase__ : Tuple = ( batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None ) # XLM and RoBERTa don"t use token_type_ids lowerCamelCase__ : str = self(**__magic_name__ ) lowerCamelCase__ ,lowerCamelCase__ : List[Any] = outputs[:2] lowerCamelCase__ : List[Any] = logits.detach().cpu().numpy() lowerCamelCase__ : Dict = inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def _snake_case (self , __magic_name__ ): lowerCamelCase__ : List[str] = torch.stack([x["""val_loss"""] for x in outputs] ).mean() lowerCamelCase__ : Optional[int] = np.concatenate([x["""pred"""] for x in outputs] , axis=0 ) lowerCamelCase__ : List[str] = np.argmax(__magic_name__ , axis=2 ) lowerCamelCase__ : Optional[Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 ) lowerCamelCase__ : Optional[int] = dict(enumerate(self.labels ) ) lowerCamelCase__ : List[str] = [[] for _ in range(out_label_ids.shape[0] )] lowerCamelCase__ : Any = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) lowerCamelCase__ : Tuple = { """val_loss""": val_loss_mean, """accuracy_score""": accuracy_score(__magic_name__ , __magic_name__ ), """precision""": precision_score(__magic_name__ , __magic_name__ ), """recall""": recall_score(__magic_name__ , __magic_name__ ), """f1""": fa_score(__magic_name__ , __magic_name__ ), } lowerCamelCase__ : Dict = dict(results.items() ) lowerCamelCase__ : str = results return ret, preds_list, out_label_list def _snake_case (self , __magic_name__ ): # when stable lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ : str = self._eval_end(__magic_name__ ) lowerCamelCase__ : Union[str, Any] = ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def _snake_case (self , __magic_name__ ): # updating to test_epoch_end instead of deprecated test_end lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ : Union[str, Any] = self._eval_end(__magic_name__ ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 lowerCamelCase__ : List[Any] = ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def _snake_case (__magic_name__ , __magic_name__ ): # Add NER specific options BaseTransformer.add_model_specific_args(__magic_name__ , __magic_name__ ) parser.add_argument( """--task_type""" , default="""NER""" , type=__magic_name__ , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" ) parser.add_argument( """--max_seq_length""" , default=128 , type=__magic_name__ , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--labels""" , default="""""" , type=__magic_name__ , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , ) parser.add_argument( """--gpus""" , default=0 , type=__magic_name__ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) return parser if __name__ == "__main__": _lowercase = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) _lowercase = NERTransformer.add_model_specific_args(parser, os.getcwd()) _lowercase = parser.parse_args() _lowercase = NERTransformer(args) _lowercase = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 _lowercase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True)) _lowercase = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
96
0
import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class SCREAMING_SNAKE_CASE__ (_a ): lowercase_ : int = (DDPMParallelScheduler,) def A__ ( self : Optional[int] , **__lowerCamelCase : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = { '''num_train_timesteps''': 10_00, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**__lowerCamelCase ) return config def A__ ( self : List[Any] ): """simple docstring""" for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=__lowerCamelCase ) def A__ ( self : List[Any] ): """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase ) def A__ ( self : str ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__lowerCamelCase ) def A__ ( self : str ): """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__lowerCamelCase ) def A__ ( self : List[str] ): """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=__lowerCamelCase ) def A__ ( self : Tuple ): """simple docstring""" self.check_over_configs(thresholding=__lowerCamelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , ) def A__ ( self : Optional[int] ): """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCamelCase ) def A__ ( self : Optional[int] ): """simple docstring""" for t in [0, 5_00, 9_99]: self.check_over_forward(time_step=__lowerCamelCase ) def A__ ( self : Dict ): """simple docstring""" lowerCAmelCase__ = self.scheduler_classes[0] lowerCAmelCase__ = self.get_scheduler_config() lowerCAmelCase__ = scheduler_class(**__lowerCamelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5 def A__ ( self : str ): """simple docstring""" lowerCAmelCase__ = self.scheduler_classes[0] lowerCAmelCase__ = self.get_scheduler_config() lowerCAmelCase__ = scheduler_class(**__lowerCamelCase ) lowerCAmelCase__ = len(__lowerCamelCase ) lowerCAmelCase__ = self.dummy_model() lowerCAmelCase__ = self.dummy_sample_deter lowerCAmelCase__ = self.dummy_sample_deter + 0.1 lowerCAmelCase__ = self.dummy_sample_deter - 0.1 lowerCAmelCase__ = samplea.shape[0] lowerCAmelCase__ = torch.stack([samplea, samplea, samplea] , dim=0 ) lowerCAmelCase__ = torch.arange(__lowerCamelCase )[0:3, None].repeat(1 , __lowerCamelCase ) lowerCAmelCase__ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) lowerCAmelCase__ = scheduler.batch_step_no_noise(__lowerCamelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) lowerCAmelCase__ = torch.sum(torch.abs(__lowerCamelCase ) ) lowerCAmelCase__ = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 1153.1833 ) < 1e-2 assert abs(result_mean.item() - 0.5005 ) < 1e-3 def A__ ( self : int ): """simple docstring""" lowerCAmelCase__ = self.scheduler_classes[0] lowerCAmelCase__ = self.get_scheduler_config() lowerCAmelCase__ = scheduler_class(**__lowerCamelCase ) lowerCAmelCase__ = len(__lowerCamelCase ) lowerCAmelCase__ = self.dummy_model() lowerCAmelCase__ = self.dummy_sample_deter lowerCAmelCase__ = torch.manual_seed(0 ) for t in reversed(range(__lowerCamelCase ) ): # 1. predict noise residual lowerCAmelCase__ = model(__lowerCamelCase , __lowerCamelCase ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase__ = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample lowerCAmelCase__ = pred_prev_sample lowerCAmelCase__ = torch.sum(torch.abs(__lowerCamelCase ) ) lowerCAmelCase__ = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 258.9606 ) < 1e-2 assert abs(result_mean.item() - 0.3372 ) < 1e-3 def A__ ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = self.scheduler_classes[0] lowerCAmelCase__ = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase__ = scheduler_class(**__lowerCamelCase ) lowerCAmelCase__ = len(__lowerCamelCase ) lowerCAmelCase__ = self.dummy_model() lowerCAmelCase__ = self.dummy_sample_deter lowerCAmelCase__ = torch.manual_seed(0 ) for t in reversed(range(__lowerCamelCase ) ): # 1. predict noise residual lowerCAmelCase__ = model(__lowerCamelCase , __lowerCamelCase ) # 2. predict previous mean of sample x_t-1 lowerCAmelCase__ = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample lowerCAmelCase__ = pred_prev_sample lowerCAmelCase__ = torch.sum(torch.abs(__lowerCamelCase ) ) lowerCAmelCase__ = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 202.0296 ) < 1e-2 assert abs(result_mean.item() - 0.2631 ) < 1e-3 def A__ ( self : int ): """simple docstring""" lowerCAmelCase__ = self.scheduler_classes[0] lowerCAmelCase__ = self.get_scheduler_config() lowerCAmelCase__ = scheduler_class(**__lowerCamelCase ) lowerCAmelCase__ = [1_00, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__lowerCamelCase ) lowerCAmelCase__ = scheduler.timesteps for i, timestep in enumerate(__lowerCamelCase ): if i == len(__lowerCamelCase ) - 1: lowerCAmelCase__ = -1 else: lowerCAmelCase__ = timesteps[i + 1] lowerCAmelCase__ = scheduler.previous_timestep(__lowerCamelCase ) lowerCAmelCase__ = prev_t.item() self.assertEqual(__lowerCamelCase , __lowerCamelCase ) def A__ ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = self.scheduler_classes[0] lowerCAmelCase__ = self.get_scheduler_config() lowerCAmelCase__ = scheduler_class(**__lowerCamelCase ) lowerCAmelCase__ = [1_00, 87, 50, 51, 0] with self.assertRaises(__lowerCamelCase , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=__lowerCamelCase ) def A__ ( self : str ): """simple docstring""" lowerCAmelCase__ = self.scheduler_classes[0] lowerCAmelCase__ = self.get_scheduler_config() lowerCAmelCase__ = scheduler_class(**__lowerCamelCase ) lowerCAmelCase__ = [1_00, 87, 50, 1, 0] lowerCAmelCase__ = len(__lowerCamelCase ) with self.assertRaises(__lowerCamelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=__lowerCamelCase , timesteps=__lowerCamelCase ) def A__ ( self : Any ): """simple docstring""" lowerCAmelCase__ = self.scheduler_classes[0] lowerCAmelCase__ = self.get_scheduler_config() lowerCAmelCase__ = scheduler_class(**__lowerCamelCase ) lowerCAmelCase__ = [scheduler.config.num_train_timesteps] with self.assertRaises( __lowerCamelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=__lowerCamelCase )
615
import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging __magic_name__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name class SCREAMING_SNAKE_CASE__ (_a ): def __init__( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=7_68 ): """simple docstring""" super().__init__(__lowerCamelCase ) lowerCAmelCase__ = proj_size lowerCAmelCase__ = CLIPVisionModel(__lowerCamelCase ) lowerCAmelCase__ = PaintByExampleMapper(__lowerCamelCase ) lowerCAmelCase__ = nn.LayerNorm(config.hidden_size ) lowerCAmelCase__ = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling lowerCAmelCase__ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def A__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : str=False ): """simple docstring""" lowerCAmelCase__ = self.model(pixel_values=__lowerCamelCase ) lowerCAmelCase__ = clip_output.pooler_output lowerCAmelCase__ = self.mapper(latent_states[:, None] ) lowerCAmelCase__ = self.final_layer_norm(__lowerCamelCase ) lowerCAmelCase__ = self.proj_out(__lowerCamelCase ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class SCREAMING_SNAKE_CASE__ (nn.Module ): def __init__( self : Optional[Any] , __lowerCamelCase : Tuple ): """simple docstring""" super().__init__() lowerCAmelCase__ = (config.num_hidden_layers + 1) // 5 lowerCAmelCase__ = config.hidden_size lowerCAmelCase__ = 1 lowerCAmelCase__ = nn.ModuleList( [ BasicTransformerBlock(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , activation_fn='''gelu''' , attention_bias=__lowerCamelCase ) for _ in range(__lowerCamelCase ) ] ) def A__ ( self : str , __lowerCamelCase : Optional[Any] ): """simple docstring""" for block in self.blocks: lowerCAmelCase__ = block(__lowerCamelCase ) return hidden_states
615
1
"""simple docstring""" import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() __A : Any = logging.get_logger(__name__) set_seed(770) __A : List[Any] = { "c_attn": "att_proj", "c_proj": "out_proj", "c_fc": "in_proj", "transformer.": "", "h.": "layers.", "ln_1": "layernorm_1", "ln_2": "layernorm_2", "ln_f": "layernorm_final", "wpe": "position_embeds_layer", "wte": "input_embeds_layer", } __A : int = { "text_small": { "repo_id": "suno/bark", "file_name": "text.pt", }, "coarse_small": { "repo_id": "suno/bark", "file_name": "coarse.pt", }, "fine_small": { "repo_id": "suno/bark", "file_name": "fine.pt", }, "text": { "repo_id": "suno/bark", "file_name": "text_2.pt", }, "coarse": { "repo_id": "suno/bark", "file_name": "coarse_2.pt", }, "fine": { "repo_id": "suno/bark", "file_name": "fine_2.pt", }, } __A : List[str] = os.path.dirname(os.path.abspath(__file__)) __A : List[str] = os.path.join(os.path.expanduser("~"), ".cache") __A : Union[str, Any] = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0") def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict=False ): '''simple docstring''' _UpperCAmelCase = model_type if use_small: key += "_small" return os.path.join(_SCREAMING_SNAKE_CASE , REMOTE_MODEL_PATHS[key]['''file_name'''] ) def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) hf_hub_download(repo_id=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , local_dir=_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict=False , _SCREAMING_SNAKE_CASE : Union[str, Any]="text" ): '''simple docstring''' if model_type == "text": _UpperCAmelCase = BarkSemanticModel _UpperCAmelCase = BarkSemanticConfig _UpperCAmelCase = BarkSemanticGenerationConfig elif model_type == "coarse": _UpperCAmelCase = BarkCoarseModel _UpperCAmelCase = BarkCoarseConfig _UpperCAmelCase = BarkCoarseGenerationConfig elif model_type == "fine": _UpperCAmelCase = BarkFineModel _UpperCAmelCase = BarkFineConfig _UpperCAmelCase = BarkFineGenerationConfig else: raise NotImplementedError() _UpperCAmelCase = f'{model_type}_small' if use_small else model_type _UpperCAmelCase = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(_SCREAMING_SNAKE_CASE ): logger.info(f'{model_type} model not found, downloading into `{CACHE_DIR}`.' ) _download(model_info['''repo_id'''] , model_info['''file_name'''] ) _UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE ) # this is a hack _UpperCAmelCase = checkpoint['''model_args'''] if "input_vocab_size" not in model_args: _UpperCAmelCase = model_args['''vocab_size'''] _UpperCAmelCase = model_args['''vocab_size'''] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments _UpperCAmelCase = model_args.pop('''n_head''' ) _UpperCAmelCase = model_args.pop('''n_embd''' ) _UpperCAmelCase = model_args.pop('''n_layer''' ) _UpperCAmelCase = ConfigClass(**checkpoint['''model_args'''] ) _UpperCAmelCase = ModelClass(config=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = GenerationConfigClass() _UpperCAmelCase = model_generation_config _UpperCAmelCase = checkpoint['''model'''] # fixup checkpoint _UpperCAmelCase = '''_orig_mod.''' for k, v in list(state_dict.items() ): if k.startswith(_SCREAMING_SNAKE_CASE ): # replace part of the key with corresponding layer name in HF implementation _UpperCAmelCase = k[len(_SCREAMING_SNAKE_CASE ) :] for old_layer_name in new_layer_name_dict: _UpperCAmelCase = new_k.replace(_SCREAMING_SNAKE_CASE , new_layer_name_dict[old_layer_name] ) _UpperCAmelCase = state_dict.pop(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = set(state_dict.keys() ) - set(model.state_dict().keys() ) _UpperCAmelCase = {k for k in extra_keys if not k.endswith('''.attn.bias''' )} _UpperCAmelCase = set(model.state_dict().keys() ) - set(state_dict.keys() ) _UpperCAmelCase = {k for k in missing_keys if not k.endswith('''.attn.bias''' )} if len(_SCREAMING_SNAKE_CASE ) != 0: raise ValueError(f'extra keys found: {extra_keys}' ) if len(_SCREAMING_SNAKE_CASE ) != 0: raise ValueError(f'missing keys: {missing_keys}' ) model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = checkpoint['''best_val_loss'''].item() logger.info(f'model loaded: {round(n_params/1E6 , 1 )}M params, {round(_SCREAMING_SNAKE_CASE , 3 )} loss' ) model.eval() model.to(_SCREAMING_SNAKE_CASE ) del checkpoint, state_dict return model def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int]=False , _SCREAMING_SNAKE_CASE : Union[str, Any]="text" ): '''simple docstring''' if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() _UpperCAmelCase = '''cpu''' # do conversion on cpu _UpperCAmelCase = _get_ckpt_path(_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = _load_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model_type=_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE ) # load bark initial model _UpperCAmelCase = _bark_load_model(_SCREAMING_SNAKE_CASE , '''cpu''' , model_type=_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE ) if model_type == "text": _UpperCAmelCase = bark_model['''model'''] if model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE ) != bark_model.get_num_params(): raise ValueError('''initial and new models don\'t have the same number of parameters''' ) # check if same output as the bark model _UpperCAmelCase = 5 _UpperCAmelCase = 10 if model_type in ["text", "coarse"]: _UpperCAmelCase = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int ) _UpperCAmelCase = bark_model(_SCREAMING_SNAKE_CASE )[0] _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE ) # take last logits _UpperCAmelCase = output_new_model_total.logits[:, [-1], :] else: _UpperCAmelCase = 3 _UpperCAmelCase = 8 _UpperCAmelCase = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) _UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = bark_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError('''initial and new outputs don\'t have the same shape''' ) if (output_new_model - output_old_model).abs().max().item() > 1E-3: raise ValueError('''initial and new outputs are not equal''' ) Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , ): '''simple docstring''' _UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BarkSemanticConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , '''config.json''' ) ) _UpperCAmelCase = BarkCoarseConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , '''config.json''' ) ) _UpperCAmelCase = BarkFineConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , '''config.json''' ) ) _UpperCAmelCase = EncodecConfig.from_pretrained('''facebook/encodec_24khz''' ) _UpperCAmelCase = BarkSemanticModel.from_pretrained(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BarkCoarseModel.from_pretrained(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BarkFineModel.from_pretrained(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = EncodecModel.from_pretrained('''facebook/encodec_24khz''' ) _UpperCAmelCase = BarkConfig.from_sub_model_configs( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) _UpperCAmelCase = BarkModel(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = semantic _UpperCAmelCase = coarseAcoustic _UpperCAmelCase = fineAcoustic _UpperCAmelCase = codec _UpperCAmelCase = bark_generation_config Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE ) bark.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id=_SCREAMING_SNAKE_CASE , push_to_hub=_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument("model_type", type=str, help="text, coarse or fine.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.") __A : Tuple = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
95
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase): """simple docstring""" UpperCamelCase__ = StableDiffusionXLImgaImgPipeline UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {"""latents"""} UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS UpperCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowercase__ ( self : Tuple )->int: torch.manual_seed(0 ) _UpperCAmelCase = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__UpperCamelCase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , ) _UpperCAmelCase = EulerDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , ) torch.manual_seed(0 ) _UpperCAmelCase = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=3_2 , ) _UpperCAmelCase = CLIPTextModel(__UpperCamelCase ) _UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__UpperCamelCase ) _UpperCAmelCase = CLIPTextModelWithProjection(__UpperCamelCase ) _UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__UpperCamelCase ) _UpperCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''text_encoder_2''': text_encoder_a, '''tokenizer_2''': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def lowercase__ ( self : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : int=0 )->Any: _UpperCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) _UpperCAmelCase = image / 2 + 0.5 if str(__UpperCamelCase ).startswith('''mps''' ): _UpperCAmelCase = torch.manual_seed(__UpperCamelCase ) else: _UpperCAmelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) _UpperCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 5.0, '''output_type''': '''numpy''', '''strength''': 0.7_5, } return inputs def lowercase__ ( self : Any )->int: _UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = StableDiffusionXLImgaImgPipeline(**__UpperCamelCase ) _UpperCAmelCase = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCAmelCase = self.get_dummy_inputs(__UpperCamelCase ) _UpperCAmelCase = sd_pipe(**__UpperCamelCase ).images _UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) _UpperCAmelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowercase__ ( self : Optional[Any] )->Optional[int]: super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def lowercase__ ( self : Optional[Any] )->List[Any]: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def lowercase__ ( self : List[str] )->str: pass def lowercase__ ( self : Dict )->List[str]: _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = StableDiffusionXLImgaImgPipeline(**__UpperCamelCase ) _UpperCAmelCase = sd_pipe.to(__UpperCamelCase ) _UpperCAmelCase = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) # forward without prompt embeds _UpperCAmelCase = self.get_dummy_inputs(__UpperCamelCase ) _UpperCAmelCase = 3 * ['''this is a negative prompt'''] _UpperCAmelCase = negative_prompt _UpperCAmelCase = 3 * [inputs['''prompt''']] _UpperCAmelCase = sd_pipe(**__UpperCamelCase ) _UpperCAmelCase = output.images[0, -3:, -3:, -1] # forward with prompt embeds _UpperCAmelCase = self.get_dummy_inputs(__UpperCamelCase ) _UpperCAmelCase = 3 * ['''this is a negative prompt'''] _UpperCAmelCase = 3 * [inputs.pop('''prompt''' )] ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = sd_pipe.encode_prompt(__UpperCamelCase , negative_prompt=__UpperCamelCase ) _UpperCAmelCase = sd_pipe( **__UpperCamelCase , prompt_embeds=__UpperCamelCase , negative_prompt_embeds=__UpperCamelCase , pooled_prompt_embeds=__UpperCamelCase , negative_pooled_prompt_embeds=__UpperCamelCase , ) _UpperCAmelCase = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @slow @require_torch_gpu class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : Union[str, Any] )->Any: super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict="cpu" , __UpperCamelCase : int=torch.floataa , __UpperCamelCase : Optional[Any]=0 )->Union[str, Any]: _UpperCAmelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) _UpperCAmelCase = np.random.RandomState(__UpperCamelCase ).standard_normal((1, 4, 6_4, 6_4) ) _UpperCAmelCase = torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase ) _UpperCAmelCase = { '''prompt''': '''a photograph of an astronaut riding a horse''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def lowercase__ ( self : Optional[Any] )->Optional[Any]: _UpperCAmelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCAmelCase = self.get_inputs(__UpperCamelCase ) _UpperCAmelCase = pipe(**__UpperCamelCase ).images _UpperCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] ) assert np.abs(image_slice - expected_slice ).max() < 7e-3
95
1
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class __lowercase ( unittest.TestCase ): '''simple docstring''' def __init__( self : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : int=56 , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : List[str]=99 , UpperCamelCase_ : Dict=32 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : str=2 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]="gelu_new" , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Dict=512 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Dict=0.02 , UpperCamelCase_ : Optional[int]=4 , UpperCamelCase_ : str="block_sparse" , UpperCamelCase_ : str=True , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Tuple=3 , ): """simple docstring""" __A = parent __A = batch_size __A = seq_length __A = is_training __A = use_attention_mask __A = use_token_type_ids __A = use_labels __A = vocab_size __A = hidden_size __A = num_hidden_layers __A = num_attention_heads __A = intermediate_size __A = hidden_act __A = hidden_dropout_prob __A = attention_probs_dropout_prob __A = max_position_embeddings __A = type_vocab_size __A = type_sequence_label_size __A = initializer_range __A = num_choices __A = rescale_embeddings __A = attention_type __A = use_bias __A = block_size __A = num_random_blocks def lowerCAmelCase_ ( self : List[str] ): """simple docstring""" __A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __A = None if self.use_attention_mask: __A = random_attention_mask([self.batch_size, self.seq_length] ) __A = None if self.use_token_type_ids: __A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __A = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase_ ( self : Optional[int] ): """simple docstring""" __A = self.prepare_config_and_inputs() __A = config_and_inputs __A = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask, } return config, inputs_dict @require_flax class __lowercase ( _UpperCAmelCase , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def lowerCAmelCase_ ( self : Tuple ): """simple docstring""" __A = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowerCAmelCase_ ( self : Optional[int] ): """simple docstring""" super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowerCAmelCase_ ( self : Tuple ): """simple docstring""" super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowerCAmelCase_ ( self : Optional[int] ): """simple docstring""" super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowerCAmelCase_ ( self : Tuple ): """simple docstring""" super().test_hidden_states_output() @slow def lowerCAmelCase_ ( self : Tuple ): """simple docstring""" for model_class_name in self.all_model_classes: __A = model_class_name.from_pretrained("""google/bigbird-roberta-base""" ) self.assertIsNotNone(_lowercase ) def lowerCAmelCase_ ( self : Union[str, Any] ): """simple docstring""" if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def lowerCAmelCase_ ( self : Union[str, Any] ): """simple docstring""" __A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __A = self._prepare_for_class(_lowercase , _lowercase ) __A = model_class(_lowercase ) @jax.jit def model_jitted(UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any]=None , **UpperCamelCase_ : Tuple ): return model(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase ) with self.subTest("""JIT Enabled""" ): __A = model_jitted(**_lowercase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __A = model_jitted(**_lowercase ).to_tuple() self.assertEqual(len(_lowercase ) , len(_lowercase ) ) for jitted_output, output in zip(_lowercase , _lowercase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple=1e-5 , UpperCamelCase_ : Dict="outputs" , UpperCamelCase_ : Optional[Any]=None ): """simple docstring""" if name.startswith("""outputs.attentions""" ): return else: super().check_pt_flax_outputs(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
637
import pickle import numpy as np from matplotlib import pyplot as plt class lowercase : def __init__( self : List[str] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : Optional[int] , _lowercase : str=0.2 , _lowercase : str=0.2 ): SCREAMING_SNAKE_CASE__ : List[Any] = bp_numa SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa SCREAMING_SNAKE_CASE__ : List[str] = conva_get[:2] SCREAMING_SNAKE_CASE__ : str = conva_get[2] SCREAMING_SNAKE_CASE__ : Any = size_pa SCREAMING_SNAKE_CASE__ : Union[str, Any] = rate_w SCREAMING_SNAKE_CASE__ : Tuple = rate_t SCREAMING_SNAKE_CASE__ : Union[str, Any] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] SCREAMING_SNAKE_CASE__ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) SCREAMING_SNAKE_CASE__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.conva[1] ) + 1 SCREAMING_SNAKE_CASE__ : Dict = -2 * np.random.rand(self.num_bpa ) + 1 SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.num_bpa ) + 1 def lowercase__ ( self : Union[str, Any] , _lowercase : Any ): # save model dict with pickle SCREAMING_SNAKE_CASE__ : Dict = { '''num_bp1''': self.num_bpa, '''num_bp2''': self.num_bpa, '''num_bp3''': self.num_bpa, '''conv1''': self.conva, '''step_conv1''': self.step_conva, '''size_pooling1''': self.size_poolinga, '''rate_weight''': self.rate_weight, '''rate_thre''': self.rate_thre, '''w_conv1''': self.w_conva, '''wkj''': self.wkj, '''vji''': self.vji, '''thre_conv1''': self.thre_conva, '''thre_bp2''': self.thre_bpa, '''thre_bp3''': self.thre_bpa, } with open(_lowercase , '''wb''' ) as f: pickle.dump(_lowercase , _lowercase ) print(f"""Model saved: {save_path}""" ) @classmethod def lowercase__ ( cls : Dict , _lowercase : int ): # read saved model with open(_lowercase , '''rb''' ) as f: SCREAMING_SNAKE_CASE__ : Optional[Any] = pickle.load(_lowercase ) # noqa: S301 SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''conv1''' ) conv_get.append(model_dic.get('''step_conv1''' ) ) SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''size_pooling1''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''num_bp1''' ) SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp2''' ) SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp3''' ) SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''rate_weight''' ) SCREAMING_SNAKE_CASE__ : str = model_dic.get('''rate_thre''' ) # create model instance SCREAMING_SNAKE_CASE__ : Dict = CNN(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) # modify model parameter SCREAMING_SNAKE_CASE__ : List[str] = model_dic.get('''w_conv1''' ) SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''wkj''' ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''vji''' ) SCREAMING_SNAKE_CASE__ : str = model_dic.get('''thre_conv1''' ) SCREAMING_SNAKE_CASE__ : Any = model_dic.get('''thre_bp2''' ) SCREAMING_SNAKE_CASE__ : List[Any] = model_dic.get('''thre_bp3''' ) return conv_ins def lowercase__ ( self : str , _lowercase : Optional[int] ): return 1 / (1 + np.exp(-1 * x )) def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] ): return round(_lowercase , 3 ) def lowercase__ ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ): # convolution process SCREAMING_SNAKE_CASE__ : Tuple = convs[0] SCREAMING_SNAKE_CASE__ : Optional[Any] = convs[1] SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.shape(_lowercase )[0] # get the data slice of original image data, data_focus SCREAMING_SNAKE_CASE__ : List[str] = [] for i_focus in range(0 , size_data - size_conv + 1 , _lowercase ): for j_focus in range(0 , size_data - size_conv + 1 , _lowercase ): SCREAMING_SNAKE_CASE__ : Optional[Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_lowercase ) # calculate the feature map of every single kernel, and saved as list of matrix SCREAMING_SNAKE_CASE__ : Optional[Any] = [] SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(_lowercase ): SCREAMING_SNAKE_CASE__ : int = [] for i_focus in range(len(_lowercase ) ): SCREAMING_SNAKE_CASE__ : Tuple = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(_lowercase ) ) SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(_lowercase ).reshape( _lowercase , _lowercase ) data_featuremap.append(_lowercase ) # expanding the data slice to One dimenssion SCREAMING_SNAKE_CASE__ : int = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_lowercase ) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asarray(_lowercase ) return focus_list, data_featuremap def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any]="average_pool" ): # pooling process SCREAMING_SNAKE_CASE__ : List[str] = len(featuremaps[0] ) SCREAMING_SNAKE_CASE__ : List[Any] = int(size_map / size_pooling ) SCREAMING_SNAKE_CASE__ : List[str] = [] for i_map in range(len(_lowercase ) ): SCREAMING_SNAKE_CASE__ : Any = featuremaps[i_map] SCREAMING_SNAKE_CASE__ : int = [] for i_focus in range(0 , _lowercase , _lowercase ): for j_focus in range(0 , _lowercase , _lowercase ): SCREAMING_SNAKE_CASE__ : Dict = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_lowercase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_lowercase ) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asmatrix(_lowercase ).reshape(_lowercase , _lowercase ) featuremap_pooled.append(_lowercase ) return featuremap_pooled def lowercase__ ( self : Optional[Any] , _lowercase : Optional[Any] ): # expanding three dimension data to one dimension list SCREAMING_SNAKE_CASE__ : Dict = [] for i in range(len(_lowercase ) ): SCREAMING_SNAKE_CASE__ : Optional[Any] = np.shape(data[i] ) SCREAMING_SNAKE_CASE__ : Tuple = data[i].reshape(1 , shapes[0] * shapes[1] ) SCREAMING_SNAKE_CASE__ : Dict = data_listed.getA().tolist()[0] data_expanded.extend(_lowercase ) SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(_lowercase ) return data_expanded def lowercase__ ( self : Tuple , _lowercase : Optional[int] ): # expanding matrix to one dimension list SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.asarray(_lowercase ) SCREAMING_SNAKE_CASE__ : Any = np.shape(_lowercase ) SCREAMING_SNAKE_CASE__ : str = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def lowercase__ ( self : List[str] , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : str ): SCREAMING_SNAKE_CASE__ : Optional[int] = [] SCREAMING_SNAKE_CASE__ : Dict = 0 for i_map in range(_lowercase ): SCREAMING_SNAKE_CASE__ : Any = np.ones((size_map, size_map) ) for i in range(0 , _lowercase , _lowercase ): for j in range(0 , _lowercase , _lowercase ): SCREAMING_SNAKE_CASE__ : Tuple = pd_pool[ i_pool ] SCREAMING_SNAKE_CASE__ : Dict = i_pool + 1 SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.multiply( _lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(_lowercase ) return pd_all def lowercase__ ( self : List[Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Tuple , _lowercase : int=bool ): # model traning print('''----------------------Start Training-------------------------''' ) print((''' - - Shape: Train_Data ''', np.shape(_lowercase )) ) print((''' - - Shape: Teach_Data ''', np.shape(_lowercase )) ) SCREAMING_SNAKE_CASE__ : Any = 0 SCREAMING_SNAKE_CASE__ : Tuple = [] SCREAMING_SNAKE_CASE__ : Optional[int] = 1_00_00 while rp < n_repeat and mse >= error_accuracy: SCREAMING_SNAKE_CASE__ : List[Any] = 0 print(f"""-------------Learning Time {rp}--------------""" ) for p in range(len(_lowercase ) ): # print('------------Learning Image: %d--------------'%p) SCREAMING_SNAKE_CASE__ : Any = np.asmatrix(datas_train[p] ) SCREAMING_SNAKE_CASE__ : str = np.asarray(datas_teach[p] ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.convolute( _lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE__ : int = self.pooling(_lowercase , self.size_poolinga ) SCREAMING_SNAKE_CASE__ : Optional[int] = np.shape(_lowercase ) SCREAMING_SNAKE_CASE__ : Dict = self._expand(_lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = data_bp_input SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.vji.T ) - self.thre_bpa SCREAMING_SNAKE_CASE__ : Any = self.sig(_lowercase ) SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.wkj.T ) - self.thre_bpa SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sig(_lowercase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- SCREAMING_SNAKE_CASE__ : Tuple = np.multiply( (data_teach - bp_outa) , np.multiply(_lowercase , (1 - bp_outa) ) ) SCREAMING_SNAKE_CASE__ : List[Any] = np.multiply( np.dot(_lowercase , self.wkj ) , np.multiply(_lowercase , (1 - bp_outa) ) ) SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(_lowercase , self.vji ) SCREAMING_SNAKE_CASE__ : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga) SCREAMING_SNAKE_CASE__ : List[str] = pd_conva_pooled.T.getA().tolist() SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._calculate_gradient_from_pool( _lowercase , _lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] ) SCREAMING_SNAKE_CASE__ : Dict = self.rate_weight * np.dot(_lowercase , _lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer SCREAMING_SNAKE_CASE__ : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE__ : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight SCREAMING_SNAKE_CASE__ : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image SCREAMING_SNAKE_CASE__ : int = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) SCREAMING_SNAKE_CASE__ : Optional[Any] = rp + 1 SCREAMING_SNAKE_CASE__ : List[str] = error_count / patterns all_mse.append(_lowercase ) def draw_error(): SCREAMING_SNAKE_CASE__ : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(_lowercase , '''+-''' ) plt.plot(_lowercase , '''r--''' ) plt.xlabel('''Learning Times''' ) plt.ylabel('''All_mse''' ) plt.grid(_lowercase , alpha=0.5 ) plt.show() print('''------------------Training Complished---------------------''' ) print((''' - - Training epoch: ''', rp, f""" - - Mse: {mse:.6f}""") ) if draw_e: draw_error() return mse def lowercase__ ( self : Union[str, Any] , _lowercase : int ): # model predict SCREAMING_SNAKE_CASE__ : Dict = [] print('''-------------------Start Testing-------------------------''' ) print((''' - - Shape: Test_Data ''', np.shape(_lowercase )) ) for p in range(len(_lowercase ) ): SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(datas_test[p] ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convolute( _lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE__ : Any = self.pooling(_lowercase , self.size_poolinga ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand(_lowercase ) SCREAMING_SNAKE_CASE__ : List[Any] = data_bp_input SCREAMING_SNAKE_CASE__ : Optional[int] = bp_outa * self.vji.T - self.thre_bpa SCREAMING_SNAKE_CASE__ : Tuple = self.sig(_lowercase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa SCREAMING_SNAKE_CASE__ : Optional[Any] = self.sig(_lowercase ) produce_out.extend(bp_outa.getA().tolist() ) SCREAMING_SNAKE_CASE__ : str = [list(map(self.do_round , _lowercase ) ) for each in produce_out] return np.asarray(_lowercase ) def lowercase__ ( self : Optional[int] , _lowercase : Tuple ): # return the data of image after convoluting process so we can check it out SCREAMING_SNAKE_CASE__ : str = np.asmatrix(_lowercase ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.convolute( _lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) SCREAMING_SNAKE_CASE__ : Dict = self.pooling(_lowercase , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
35
0
'''simple docstring''' import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def UpperCamelCase_ ( A__ : List[Any] , A__ : Dict=False ): '''simple docstring''' lowerCAmelCase_ : int = OmegaConf.load(__UpperCamelCase ) if display: print(yaml.dump(OmegaConf.to_container(__UpperCamelCase ) ) ) return config def UpperCamelCase_ ( A__ : List[Any] , A__ : Optional[int]=None , A__ : List[str]=None ): '''simple docstring''' if conf_path is None: lowerCAmelCase_ : List[str] = """./model_checkpoints/vqgan_only.yaml""" lowerCAmelCase_ : Tuple = load_config(__UpperCamelCase , display=__UpperCamelCase ) lowerCAmelCase_ : int = VQModel(**config.model.params ) if ckpt_path is None: lowerCAmelCase_ : List[Any] = """./model_checkpoints/vqgan_only.pt""" lowerCAmelCase_ : int = torch.load(__UpperCamelCase , map_location=__UpperCamelCase ) if ".ckpt" in ckpt_path: lowerCAmelCase_ : Optional[int] = sd["""state_dict"""] model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase ) model.to(__UpperCamelCase ) del sd return model def UpperCamelCase_ ( A__ : Tuple , A__ : str ): '''simple docstring''' lowerCAmelCase_ : Union[str, Any] = model.encode(__UpperCamelCase ) print(f'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' ) lowerCAmelCase_ : int = model.decode(__UpperCamelCase ) return xrec def UpperCamelCase_ ( A__ : Dict , A__ : List[Any]=False ): '''simple docstring''' lowerCAmelCase_ : Tuple = string.rsplit(""".""" , 1 ) if reload: lowerCAmelCase_ : Tuple = importlib.import_module(__UpperCamelCase ) importlib.reload(__UpperCamelCase ) return getattr(importlib.import_module(__UpperCamelCase , package=__UpperCamelCase ) , cls ) def UpperCamelCase_ ( A__ : Dict ): '''simple docstring''' if "target" not in config: raise KeyError("""Expected key `target` to instantiate.""" ) return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) ) def UpperCamelCase_ ( A__ : List[Any] , A__ : Union[str, Any] , A__ : int=True , A__ : str=True ): '''simple docstring''' lowerCAmelCase_ : int = instantiate_from_config(__UpperCamelCase ) if sd is not None: model.load_state_dict(__UpperCamelCase ) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def UpperCamelCase_ ( A__ : int , A__ : Dict , A__ : Dict , A__ : Tuple ): '''simple docstring''' if ckpt: lowerCAmelCase_ : int = torch.load(__UpperCamelCase , map_location="""cpu""" ) lowerCAmelCase_ : List[str] = pl_sd["""global_step"""] print(f'loaded model from global step {global_step}.' ) else: lowerCAmelCase_ : int = {"""state_dict""": None} lowerCAmelCase_ : Any = None lowerCAmelCase_ : Dict = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=__UpperCamelCase , eval_mode=__UpperCamelCase )["""model"""] return model, global_step
721
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) __A : Dict = { "configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"], "processing_speech_to_text": ["Speech2TextProcessor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[Any] = ["Speech2TextTokenizer"] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Dict = ["Speech2TextFeatureExtractor"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ "TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSpeech2TextForConditionalGeneration", "TFSpeech2TextModel", "TFSpeech2TextPreTrainedModel", ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ "SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Speech2TextForConditionalGeneration", "Speech2TextModel", "Speech2TextPreTrainedModel", ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys __A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
398
0