code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def snake_case_ ( A_ : Dict, A_ : str, A_ : str, A_ : Path, A_ : str = None, A_ : str = None, A_ : str = None, ):
'''simple docstring'''
if config_name_or_path is None:
_lowerCamelCase : Dict = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
_lowerCamelCase : Optional[Any] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
_lowerCamelCase : Union[str, Any] = question_encoder_name_or_path
_lowerCamelCase : Optional[int] = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
_lowerCamelCase : Optional[Any] = RagConfig.from_pretrained(A_ )
_lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(A_ )
_lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(A_ )
_lowerCamelCase : Optional[int] = gen_config
_lowerCamelCase : List[str] = question_encoder_config
_lowerCamelCase : Union[str, Any] = model_class.from_pretrained_question_encoder_generator(
A_, A_, config=A_ )
rag_model.save_pretrained(A_ )
# Sanity check.
model_class.from_pretrained(A_ )
# Save tokenizers.
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(A_ )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(A_ )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 72 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Any , *lowercase_ :str , **lowercase_ :List[Any] ) -> Union[str, Any]:
super().__init__(*lowercase_ , **lowercase_ )
self.check_model_type(lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Any=None , lowercase_ :Optional[int]=None , lowercase_ :Tuple=None , **lowercase_ :Tuple ) -> Dict:
UpperCAmelCase , UpperCAmelCase = {}, {}
if padding is not None:
UpperCAmelCase = padding
if truncation is not None:
UpperCAmelCase = truncation
if top_k is not None:
UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self :List[Any] , lowercase_ :Union["Image.Image", str] , lowercase_ :str = None , **lowercase_ :Union[str, Any] ) -> Union[str, Any]:
if isinstance(lowercase_ , (Image.Image, str) ) and isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = {'image': image, 'question': question}
else:
UpperCAmelCase = image
UpperCAmelCase = super().__call__(lowercase_ , **lowercase_ )
return results
def UpperCAmelCase__ ( self :List[str] , lowercase_ :List[Any] , lowercase_ :int=False , lowercase_ :Optional[int]=False ) -> Union[str, Any]:
UpperCAmelCase = load_image(inputs['image'] )
UpperCAmelCase = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_ )
UpperCAmelCase = self.image_processor(images=lowercase_ , return_tensors=self.framework )
model_inputs.update(lowercase_ )
return model_inputs
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :List[str] ) -> Any:
UpperCAmelCase = self.model(**lowercase_ )
return model_outputs
def UpperCAmelCase__ ( self :Dict , lowercase_ :Tuple , lowercase_ :List[Any]=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase = model_outputs.logits.sigmoid()[0]
UpperCAmelCase , UpperCAmelCase = probs.topk(lowercase_ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCAmelCase = scores.tolist()
UpperCAmelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_ )]
| 78 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=A_ ):
__a = ["""torch""", """torchsde"""]
def __init__( self : Tuple , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Union[str, Any] ):
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase ( cls : int , *_lowerCamelCase : int , **_lowerCamelCase : Tuple ):
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase ( cls : List[str] , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Tuple ):
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 40 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ) -> Union[str, Any]:
# Return True if there is node that has not iterated.
_snake_case = [False] * len(__lowerCamelCase )
_snake_case = []
queue.append(__lowerCamelCase )
_snake_case = True
while queue:
_snake_case = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCamelCase )
_snake_case = True
_snake_case = u
return visited[t]
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict ) -> Dict:
# This array is filled by BFS and to store path
_snake_case = [-1] * (len(__lowerCamelCase ))
_snake_case = 0
while bfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
_snake_case = float('''Inf''' )
_snake_case = sink
while s != source:
# Find the minimum value in select path
_snake_case = min(__lowerCamelCase , graph[parent[s]][s] )
_snake_case = parent[s]
max_flow += path_flow
_snake_case = sink
while v != source:
_snake_case = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_snake_case = parent[v]
return max_flow
UpperCAmelCase__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
UpperCAmelCase__ , UpperCAmelCase__ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 40 | 1 |
import math
def UpperCamelCase (lowercase_: int = 100 ) -> int:
A__ : Tuple = sum(i * i for i in range(1 , n + 1 ) )
A__ : int = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 192 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : Optional[Any] = '▁'
A_ : int = {'vocab_file': 'sentencepiece.bpe.model'}
A_ : int = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
A_ : Optional[int] = {
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
A_ : Tuple = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: str = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__: List[int] = []
UpperCAmelCase__: List[int] = []
def __init__( self , A__ , A__="<s>" , A__="</s>" , A__="</s>" , A__="<s>" , A__="<unk>" , A__="<pad>" , A__="<mask>" , A__=None , A__=None , A__=None , A__ = None , A__=None , A__=False , **A__ , ):
# Mask token behave like a normal word, i.e. include the space before it
A__ : Any = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else mask_token
A__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
A__ : List[str] = legacy_behaviour
super().__init__(
bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , cls_token=A__ , pad_token=A__ , mask_token=A__ , tokenizer_file=A__ , src_lang=A__ , tgt_lang=A__ , additional_special_tokens=A__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=A__ , **A__ , )
A__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A__ ) )
A__ : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
A__ : str = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A__ : str = 1
A__ : Optional[int] = len(self.sp_model )
A__ : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(A__ )
}
A__ : Tuple = {v: k for k, v in self.lang_code_to_id.items()}
A__ : Dict = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
A__ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
A__ : int = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
A__ : int = src_lang if src_lang is not None else """eng_Latn"""
A__ : str = self.lang_code_to_id[self._src_lang]
A__ : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
A__ : Tuple = self.__dict__.copy()
A__ : List[Any] = None
A__ : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A__ ):
A__ : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ : Any = {}
A__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __A ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __A ( self ):
return self._src_lang
@src_lang.setter
def __A ( self , A__ ):
A__ : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __A ( self , A__ , A__ = None , A__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
A__ : Dict = [1] * len(self.prefix_tokens )
A__ : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A__ )) + suffix_ones
return prefix_ones + ([0] * len(A__ )) + ([0] * len(A__ )) + suffix_ones
def __A ( self , A__ , A__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __A ( self , A__ , A__ = None ):
A__ : Dict = [self.sep_token_id]
A__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , A__ , A__ , A__ , A__ , **A__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
A__ : Optional[int] = src_lang
A__ : List[Any] = self(A__ , add_special_tokens=A__ , return_tensors=A__ , **A__ )
A__ : Optional[int] = self.convert_tokens_to_ids(A__ )
A__ : Optional[int] = tgt_lang_id
return inputs
def __A ( self ):
A__ : List[str] = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self , A__ ):
return self.sp_model.encode(A__ , out_type=A__ )
def __A ( self , A__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A__ : List[str] = self.sp_model.PieceToId(A__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __A ( self , A__ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __A ( self , A__ ):
A__ : Optional[Any] = """""".join(A__ ).replace(A__ , """ """ ).strip()
return out_string
def __A ( self , A__ , A__ = None ):
if not os.path.isdir(A__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ : Any = os.path.join(
A__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , """wb""" ) as fi:
A__ : str = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
def __A ( self , A__ , A__ = "eng_Latn" , A__ = None , A__ = "fra_Latn" , **A__ , ):
A__ : Any = src_lang
A__ : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(A__ , A__ , **A__ )
def __A ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __A ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __A ( self , A__ ):
A__ : List[str] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
A__ : Dict = []
A__ : str = [self.eos_token_id, self.cur_lang_code]
else:
A__ : List[str] = [self.cur_lang_code]
A__ : Optional[Any] = [self.eos_token_id]
def __A ( self , A__ ):
A__ : Union[str, Any] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
A__ : Union[str, Any] = []
A__ : int = [self.eos_token_id, self.cur_lang_code]
else:
A__ : Dict = [self.cur_lang_code]
A__ : str = [self.eos_token_id]
| 192 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=lowerCAmelCase_ ):
A_ = ["transformers", "torch", "note_seq"]
def __init__( self , *__a , **__a ):
'''simple docstring'''
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def __UpperCAmelCase ( cls , *__a , **__a ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def __UpperCAmelCase ( cls , *__a , **__a ):
'''simple docstring'''
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 354 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__lowercase : str = logging.get_logger(__name__)
# General docstring
__lowercase : List[str] = 'MobileNetV1Config'
# Base docstring
__lowercase : Tuple = 'google/mobilenet_v1_1.0_224'
__lowercase : List[Any] = [1, 10_24, 7, 7]
# Image classification docstring
__lowercase : int = 'google/mobilenet_v1_1.0_224'
__lowercase : Any = 'tabby, tabby cat'
__lowercase : Dict = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any]=None ):
__a : Dict = {}
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = model.mobilenet_va
else:
__a : List[Any] = model
__a : Dict = 'MobilenetV1/Conv2d_0/'
__a : Dict = backbone.conv_stem.convolution.weight
__a : Optional[Any] = backbone.conv_stem.normalization.bias
__a : int = backbone.conv_stem.normalization.weight
__a : int = backbone.conv_stem.normalization.running_mean
__a : Tuple = backbone.conv_stem.normalization.running_var
for i in range(13 ):
__a : int = i + 1
__a : Dict = i * 2
__a : Dict = backbone.layer[pt_index]
__a : Dict = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
__a : Union[str, Any] = pointer.convolution.weight
__a : Optional[Any] = pointer.normalization.bias
__a : Union[str, Any] = pointer.normalization.weight
__a : List[Any] = pointer.normalization.running_mean
__a : Tuple = pointer.normalization.running_var
__a : List[str] = backbone.layer[pt_index + 1]
__a : Optional[Any] = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
__a : Optional[int] = pointer.convolution.weight
__a : List[str] = pointer.normalization.bias
__a : Dict = pointer.normalization.weight
__a : Dict = pointer.normalization.running_mean
__a : Optional[int] = pointer.normalization.running_var
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Any = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
__a : Optional[int] = model.classifier.weight
__a : List[Any] = model.classifier.bias
return tf_to_pt_map
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
__a : Union[str, Any] = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
__a : List[str] = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[Any] = array
# Build TF to PyTorch weights loading map
__a : Optional[int] = _build_tf_to_pytorch_map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
__a : Union[str, Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
__a : Optional[Any] = np.transpose(_SCREAMING_SNAKE_CASE , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
__a : Union[str, Any] = array.squeeze().transpose()
else:
__a : Dict = np.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
__a : List[str] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
tf_weights.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/RMSProp' , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/RMSProp_1' , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + '/ExponentialMovingAverage' , _SCREAMING_SNAKE_CASE )
logger.info(F"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def lowerCamelCase (_SCREAMING_SNAKE_CASE : torch.Tensor , _SCREAMING_SNAKE_CASE : nn.Convad ):
__a , __a : Any = features.shape[-2:]
__a , __a : int = conv_layer.stride
__a , __a : Any = conv_layer.kernel_size
if in_height % stride_height == 0:
__a : int = max(kernel_height - stride_height , 0 )
else:
__a : int = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__a : Any = max(kernel_width - stride_width , 0 )
else:
__a : str = max(kernel_width - (in_width % stride_width) , 0 )
__a : int = pad_along_width // 2
__a : Dict = pad_along_width - pad_left
__a : List[str] = pad_along_height // 2
__a : Union[str, Any] = pad_along_height - pad_top
__a : str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 'constant' , 0.0 )
class __UpperCamelCase ( nn.Module ):
def __init__( self , __a , __a , __a , __a , __a = 1 , __a = 1 , __a = False , __a = True , __a = True , ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
__a : Dict = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
__a : Union[str, Any] = nn.Convad(
in_channels=__a , out_channels=__a , kernel_size=__a , stride=__a , padding=__a , groups=__a , bias=__a , padding_mode='zeros' , )
if use_normalization:
__a : List[str] = nn.BatchNormad(
num_features=__a , eps=config.layer_norm_eps , momentum=0.9997 , affine=__a , track_running_stats=__a , )
else:
__a : Tuple = None
if use_activation:
if isinstance(__a , __a ):
__a : Tuple = ACTaFN[use_activation]
elif isinstance(config.hidden_act , __a ):
__a : Union[str, Any] = ACTaFN[config.hidden_act]
else:
__a : Dict = config.hidden_act
else:
__a : List[Any] = None
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if self.config.tf_padding:
__a : Union[str, Any] = apply_tf_padding(__a , self.convolution )
__a : Union[str, Any] = self.convolution(__a )
if self.normalization is not None:
__a : str = self.normalization(__a )
if self.activation is not None:
__a : Optional[int] = self.activation(__a )
return features
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = MobileNetVaConfig
A_ = load_tf_weights_in_mobilenet_va
A_ = "mobilenet_v1"
A_ = "pixel_values"
A_ = False
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if isinstance(__a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__a , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__lowercase : Any = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowercase : Optional[int] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowerCAmelCase_ , )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a = True ):
'''simple docstring'''
super().__init__(__a )
__a : Optional[int] = config
__a : str = 32
__a : Dict = max(int(depth * config.depth_multiplier ) , config.min_depth )
__a : Union[str, Any] = MobileNetVaConvLayer(
__a , in_channels=config.num_channels , out_channels=__a , kernel_size=3 , stride=2 , )
__a : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__a : Any = nn.ModuleList()
for i in range(13 ):
__a : Union[str, Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__a : List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
__a , in_channels=__a , out_channels=__a , kernel_size=3 , stride=strides[i] , groups=__a , ) )
self.layer.append(
MobileNetVaConvLayer(
__a , in_channels=__a , out_channels=__a , kernel_size=1 , ) )
__a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a : int = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
__a : Union[str, Any] = self.conv_stem(__a )
__a : Any = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__a : List[str] = layer_module(__a )
if output_hidden_states:
__a : List[Any] = all_hidden_states + (hidden_states,)
__a : str = hidden_states
if self.pooler is not None:
__a : Union[str, Any] = torch.flatten(self.pooler(__a ) , start_dim=1 )
else:
__a : int = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__a , pooler_output=__a , hidden_states=__a , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase_ , )
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a ):
'''simple docstring'''
super().__init__(__a )
__a : Tuple = config.num_labels
__a : Tuple = MobileNetVaModel(__a )
__a : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__a : Any = nn.Dropout(config.classifier_dropout_prob , inplace=__a )
__a : Any = nn.Linear(__a , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCAmelCase ( self , __a = None , __a = None , __a = None , __a = None , ):
'''simple docstring'''
__a : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
__a : Dict = self.mobilenet_va(__a , output_hidden_states=__a , return_dict=__a )
__a : List[str] = outputs.pooler_output if return_dict else outputs[1]
__a : int = self.classifier(self.dropout(__a ) )
__a : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__a : str = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__a : int = 'single_label_classification'
else:
__a : Optional[Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
__a : Optional[Any] = MSELoss()
if self.num_labels == 1:
__a : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__a : Any = loss_fct(__a , __a )
elif self.config.problem_type == "single_label_classification":
__a : List[str] = CrossEntropyLoss()
__a : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__a : Tuple = BCEWithLogitsLoss()
__a : Optional[int] = loss_fct(__a , __a )
if not return_dict:
__a : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=__a , logits=__a , hidden_states=outputs.hidden_states , )
| 294 | 0 |
"""simple docstring"""
import os
__SCREAMING_SNAKE_CASE ={'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : Tuple = 0
lowercase_ : Dict = 0
while index < len(_snake_case ) - 1:
lowercase_ : List[Any] = SYMBOLS[numerals[index]]
lowercase_ : str = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase_ : Union[str, Any] = """"""
lowercase_ : str = num // 10_00
numerals += m_count * "M"
num %= 10_00
lowercase_ : str = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
lowercase_ : int = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def lowercase__( __SCREAMING_SNAKE_CASE : int = "/p089_roman.txt" ):
lowercase_ : Dict = 0
with open(os.path.dirname(_snake_case ) + roman_numerals_filename ) as filea:
lowercase_ : Optional[int] = filea.readlines()
for line in lines:
lowercase_ : List[Any] = line.strip()
lowercase_ : str = parse_roman_numerals(_snake_case )
lowercase_ : Optional[int] = generate_roman_numerals(_snake_case )
savings += len(_snake_case ) - len(_snake_case )
return savings
if __name__ == "__main__":
print(F"{solution() = }")
| 213 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = 0, 0, 0
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 5
for _ in range(1 ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(_snake_case ,_snake_case ,_snake_case )
ugly_nums.append(_snake_case )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_0_0) = }""")
| 25 | 0 |
"""simple docstring"""
def snake_case ( A__ ):
UpperCAmelCase_ : Tuple = current_set.copy()
for row_index, row in enumerate(A__ ):
UpperCAmelCase_ : List[Any] = row[0]
for column_index, column in enumerate(A__ ):
if magnitude == 0:
UpperCAmelCase_ : str = column
continue
UpperCAmelCase_ : Union[str, Any] = column / magnitude
# Subtract to cancel term
UpperCAmelCase_ : str = current_set[0]
UpperCAmelCase_ : int = [first_row]
UpperCAmelCase_ : Optional[Any] = current_set[1::]
for row in current_set:
UpperCAmelCase_ : List[str] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(A__ )
continue
for column_index in range(len(A__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(A__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
UpperCAmelCase_ : Dict = final_set[0]
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Tuple = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
UpperCAmelCase_ : List[str] = simplify(A__ )
for i in range(len(A__ ) ):
resultant[i].insert(0 ,current_first_column[i] )
resultant.insert(0 ,A__ )
UpperCAmelCase_ : int = resultant
return final_set
def snake_case ( A__ ):
if len(A__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
UpperCAmelCase_ : str = len(A__ ) + 1
if any(len(A__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(A__ ,(int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(A__ ) == 1:
return [equations[0][-1] / equations[0][0]]
UpperCAmelCase_ : List[Any] = equations.copy()
if any(0 in row for row in data_set ):
UpperCAmelCase_ : Any = data_set.copy()
UpperCAmelCase_ : Optional[Any] = []
for row_index, row in enumerate(A__ ):
if 0 not in row:
UpperCAmelCase_ : Tuple = data_set.pop(A__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0 ,A__ )
UpperCAmelCase_ : Optional[Any] = data_set.copy()
UpperCAmelCase_ : Union[str, Any] = simplify(A__ )
UpperCAmelCase_ : Tuple = simplified[::-1]
UpperCAmelCase_ : list = []
for row in simplified:
UpperCAmelCase_ : Optional[int] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
UpperCAmelCase_ : List[Any] = row.copy()[: len(A__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(A__ ) == 0:
solutions.append(0 )
continue
UpperCAmelCase_ : Dict = temp_row[1::]
UpperCAmelCase_ : List[str] = temp_row[::-1]
for column_index, column in enumerate(A__ ):
current_solution -= column * solutions[column_index]
solutions.append(A__ )
UpperCAmelCase_ : Optional[Any] = []
for item in solutions:
final.append(float(round(A__ ,5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 253 |
"""simple docstring"""
def snake_case ( A__ = 10_00 ):
UpperCAmelCase_ : Optional[Any] = 2**power
UpperCAmelCase_ : Optional[int] = str(A__ )
UpperCAmelCase_ : Tuple = list(A__ )
UpperCAmelCase_ : Any = 0
for i in list_num:
sum_of_num += int(A__ )
return sum_of_num
if __name__ == "__main__":
lowerCamelCase_ = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCamelCase_ = solution(power)
print('''Sum of the digits is: ''', result)
| 253 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__snake_case = random.Random()
def _lowercase ( UpperCamelCase_ , UpperCamelCase_=1.0 , UpperCamelCase_=None , UpperCamelCase_=None ) -> Any:
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE__ = global_rng
SCREAMING_SNAKE_CASE__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowercase__ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : str=400 , UpperCAmelCase_ : int=2000 , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Optional[int]=16000 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : int=80 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : int=64 , UpperCAmelCase_ : Dict="hann_window" , UpperCAmelCase_ : str=80 , UpperCAmelCase_ : List[Any]=7600 , UpperCAmelCase_ : Optional[Any]=1e-1_0 , UpperCAmelCase_ : List[str]=True , ):
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = min_seq_length
SCREAMING_SNAKE_CASE__ = max_seq_length
SCREAMING_SNAKE_CASE__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__ = feature_size
SCREAMING_SNAKE_CASE__ = padding_value
SCREAMING_SNAKE_CASE__ = sampling_rate
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = num_mel_bins
SCREAMING_SNAKE_CASE__ = hop_length
SCREAMING_SNAKE_CASE__ = win_length
SCREAMING_SNAKE_CASE__ = win_function
SCREAMING_SNAKE_CASE__ = fmin
SCREAMING_SNAKE_CASE__ = fmax
SCREAMING_SNAKE_CASE__ = mel_floor
SCREAMING_SNAKE_CASE__ = return_attention_mask
def A_ ( self : List[str] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def A_ ( self : Any , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Dict=False ):
def _flatten(UpperCAmelCase_ : Optional[int] ):
return list(itertools.chain(*UpperCAmelCase_ ) )
if equal_length:
SCREAMING_SNAKE_CASE__ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ = [np.asarray(UpperCAmelCase_ ) for x in speech_inputs]
return speech_inputs
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=False ):
if equal_length:
SCREAMING_SNAKE_CASE__ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ = [np.asarray(UpperCAmelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : Union[str, Any] =SpeechTaFeatureExtractor
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = SpeechTaFeatureExtractionTester(self )
def A_ ( self : str , UpperCAmelCase_ : Dict ):
self.assertTrue(np.all(np.mean(UpperCAmelCase_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase_ , axis=0 ) - 1 ) < 1e-3 ) )
def A_ ( self : Tuple ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__ = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ = feat_extract(UpperCAmelCase_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ = feat_extract(UpperCAmelCase_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-3 ) )
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE__ = [None, 1600, None]
for max_length, padding in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = feat_extract(UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , return_tensors='np' )
SCREAMING_SNAKE_CASE__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ = range(800 , 1400 , 200 )
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE__ = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE__ = [None, 1600, None]
for max_length, padding in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = feat_extract(UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ = feat_extract(
UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=1000 , padding='max_length' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ = feat_extract(
UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=1000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ = feat_extract(
UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=2000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ = np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__ = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def A_ ( self : Union[str, Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE__ = feature_extractor(audio_target=UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE__ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__ = np.asarray(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-3 ) )
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ) for x, y in zip(UpperCAmelCase_ , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
SCREAMING_SNAKE_CASE__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
SCREAMING_SNAKE_CASE__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ = feat_extract.pad(UpperCAmelCase_ , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE__ = feat_extract.pad(UpperCAmelCase_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ = [len(UpperCAmelCase_ ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ = feat_extract.pad(UpperCAmelCase_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , UpperCAmelCase_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , UpperCAmelCase_ )
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ = [len(UpperCAmelCase_ ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ = min(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ = feat_extract.pad(
UpperCAmelCase_ , padding='max_length' , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='np' )
self.assertIn('attention_mask' , UpperCAmelCase_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def A_ ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] ):
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__ = ds.sort('id' ).select(range(UpperCAmelCase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def A_ ( self : List[str] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
SCREAMING_SNAKE_CASE__ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ = feature_extractor(UpperCAmelCase_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , UpperCAmelCase_ , atol=1e-6 ) )
def A_ ( self : Optional[int] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
SCREAMING_SNAKE_CASE__ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ = feature_extractor(audio_target=UpperCAmelCase_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , UpperCAmelCase_ , atol=1e-4 ) )
| 176 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowercase__ ( _UpperCAmelCase ):
A__ : Union[str, Any] ="""Wav2Vec2FeatureExtractor"""
A__ : Any ="""AutoTokenizer"""
def __init__( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str ):
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.feature_extractor
SCREAMING_SNAKE_CASE__ = False
@classmethod
def A_ ( cls : Union[str, Any] , UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ):
try:
return super().from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
except OSError:
warnings.warn(
F'Loading a tokenizer inside {cls.__name__} from a config that does not'
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = WavaVecaCTCTokenizer.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
return cls(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
def __call__( self : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase_ , **UpperCAmelCase_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
SCREAMING_SNAKE_CASE__ = kwargs.pop('raw_speech' )
else:
SCREAMING_SNAKE_CASE__ = kwargs.pop('audio' , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = kwargs.pop('sampling_rate' , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = kwargs.pop('text' , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
SCREAMING_SNAKE_CASE__ = args[0]
SCREAMING_SNAKE_CASE__ = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
SCREAMING_SNAKE_CASE__ = self.feature_extractor(UpperCAmelCase_ , *UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ = encodings['input_ids']
return inputs
def A_ ( self : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[int] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = kwargs.pop('input_features' , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = kwargs.pop('labels' , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
SCREAMING_SNAKE_CASE__ = args[0]
SCREAMING_SNAKE_CASE__ = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE__ = self.feature_extractor.pad(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
if labels is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer.pad(UpperCAmelCase_ , **UpperCAmelCase_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE__ = labels['input_ids']
return input_features
def A_ ( self : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str] ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : Tuple , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[str] ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@contextmanager
def A_ ( self : str ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ = self.feature_extractor
SCREAMING_SNAKE_CASE__ = False
| 176 | 1 |
def snake_case_(_UpperCamelCase = 1_000_000 ) -> int:
"""simple docstring"""
_snake_case = 1
_snake_case = 1
_snake_case = {1: 1}
for inputa in range(2 , _UpperCamelCase ):
_snake_case = 0
_snake_case = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_snake_case = (3 * number) + 1
counter += 1
if inputa not in counters:
_snake_case = counter
if counter > pre_counter:
_snake_case = inputa
_snake_case = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 364 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 278 | 0 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : List[str] = '''true'''
def __SCREAMING_SNAKE_CASE ( A_ , A_=82 , A_=16 ):
set_seed(42 )
lowerCAmelCase__ : Union[str, Any] = RegressionModel()
lowerCAmelCase__ : Optional[int] = deepcopy(A_ )
lowerCAmelCase__ : Any = RegressionDataset(length=A_ )
lowerCAmelCase__ : List[str] = DataLoader(A_ , batch_size=A_ )
model.to(accelerator.device )
lowerCAmelCase__ ,lowerCAmelCase__ : Dict = accelerator.prepare(A_ , A_ )
return model, ddp_model, dataloader
def __SCREAMING_SNAKE_CASE ( A_ , A_=False ):
lowerCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
lowerCAmelCase__ : List[str] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(A_ ):
lowerCAmelCase__ : Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A_ , max_length=A_ )
return outputs
with accelerator.main_process_first():
lowerCAmelCase__ : Dict = dataset.map(
A_ , batched=A_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
lowerCAmelCase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A_ ):
if use_longest:
return tokenizer.pad(A_ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(A_ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return DataLoader(A_ , shuffle=A_ , collate_fn=A_ , batch_size=16 )
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Union[str, Any] = Accelerator(dispatch_batches=A_ , split_batches=A_ )
lowerCAmelCase__ : str = get_dataloader(A_ , not dispatch_batches )
lowerCAmelCase__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = accelerator.prepare(A_ , A_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ : Union[str, Any] = []
for batch in dataloader:
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = batch.values()
with torch.no_grad():
lowerCAmelCase__ : List[str] = model(A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase__ ,lowerCAmelCase__ : int = [], []
for logit, targ in logits_and_targets:
logits.append(A_ )
targs.append(A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = torch.cat(A_ ), torch.cat(A_ )
return logits, targs
def __SCREAMING_SNAKE_CASE ( A_ , A_=82 , A_=False , A_=False , A_=16 ):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = get_basic_setup(A_ , A_ , A_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = generate_predictions(A_ , A_ , A_ )
assert (
len(A_ ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(A_ )}'
def __SCREAMING_SNAKE_CASE ( A_ = False , A_ = False ):
lowerCAmelCase__ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = get_mrpc_setup(A_ , A_ )
# First do baseline
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = setup['''no''']
model.to(A_ )
model.eval()
for batch in dataloader:
batch.to(A_ )
with torch.inference_mode():
lowerCAmelCase__ : Optional[int] = model(**A_ )
lowerCAmelCase__ : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=A_ , references=batch['''labels'''] )
lowerCAmelCase__ : Dict = metric.compute()
# Then do distributed
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase__ : Union[str, Any] = model(**A_ )
lowerCAmelCase__ : int = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ : int = batch['''labels''']
lowerCAmelCase__ ,lowerCAmelCase__ : int = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=A_ , references=A_ )
lowerCAmelCase__ : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[str] = Accelerator(split_batches=A_ , dispatch_batches=A_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(A_ , A_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase__ : Optional[Any] = Accelerator(split_batches=A_ , dispatch_batches=A_ )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(A_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
lowerCAmelCase__ : List[str] = Accelerator()
test_torch_metrics(A_ , 5_12 )
accelerator.state._reset_state()
def __SCREAMING_SNAKE_CASE ( A_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 106 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__UpperCamelCase : Tuple = TypeVar('''T''')
class SCREAMING_SNAKE_CASE ( Generic[T] ):
"""simple docstring"""
lowercase__ = 42 # Cache store of keys
lowercase__ = 42 # References of the keys in cache
lowercase__ = 10 # Maximum capacity of cache
def __init__( self : Dict ,lowercase_ : int ):
lowerCAmelCase__ : str = deque()
lowerCAmelCase__ : Any = set()
if not n:
lowerCAmelCase__ : Optional[Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
lowerCAmelCase__ : int = n
def __lowerCAmelCase ( self : str ,lowercase_ : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowerCAmelCase__ : Any = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def __lowerCAmelCase ( self : int ):
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Tuple ):
return F'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 106 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class _lowerCamelCase :
def __init__( self , UpperCamelCase , UpperCamelCase=3 , UpperCamelCase=7 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=32 , UpperCamelCase=5 , UpperCamelCase=4 , UpperCamelCase=37 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=5_12 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=None , ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = parent
lowerCAmelCase__ : Any = batch_size
lowerCAmelCase__ : List[str] = seq_length
lowerCAmelCase__ : Union[str, Any] = is_training
lowerCAmelCase__ : Optional[int] = use_input_mask
lowerCAmelCase__ : List[str] = use_token_type_ids
lowerCAmelCase__ : Optional[int] = use_labels
lowerCAmelCase__ : List[str] = vocab_size
lowerCAmelCase__ : Tuple = hidden_size
lowerCAmelCase__ : List[str] = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = intermediate_size
lowerCAmelCase__ : Any = hidden_act
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : Tuple = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = max_position_embeddings
lowerCAmelCase__ : Any = type_vocab_size
lowerCAmelCase__ : str = type_sequence_label_size
lowerCAmelCase__ : Any = initializer_range
lowerCAmelCase__ : Optional[int] = num_labels
lowerCAmelCase__ : str = num_choices
lowerCAmelCase__ : List[Any] = scope
def _lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : List[Any] = None
if self.use_input_mask:
lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : List[str] = None
if self.use_labels:
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=UpperCAmelCase__ , )
def _lowerCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = FalconModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase__ : Any = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
lowerCAmelCase__ : List[str] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Optional[Any] = FalconModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase__ : Tuple = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
lowerCAmelCase__ : Tuple = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
lowerCAmelCase__ : Tuple = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Any = FalconForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase__ : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : str = FalconForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
lowerCAmelCase__ : Optional[Any] = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
lowerCAmelCase__ : Any = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase__ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase__ : Optional[int] = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["""hidden_states"""][0]
lowerCAmelCase__ : str = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["""hidden_states"""][0]
# select random slice
lowerCAmelCase__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase__ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
def _lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase__ : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
_lowerCamelCase :Tuple = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCamelCase :Dict = (FalconForCausalLM,) if is_torch_available() else ()
_lowerCamelCase :int = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase :int = False
_lowerCamelCase :Union[str, Any] = False
def _lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = FalconModelTester(self )
lowerCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def _lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ , *lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowerCAmelCase__ : Union[str, Any] = alibi
self.model_tester.create_and_check_model(UpperCAmelCase__ , *UpperCAmelCase__ )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Union[str, Any] = 3
lowerCAmelCase__ : List[str] = input_dict["""input_ids"""]
lowerCAmelCase__ : Tuple = input_ids.ne(1 ).to(UpperCAmelCase__ )
lowerCAmelCase__ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : Dict = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase__ : Optional[int] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : List[Any] = """single_label_classification"""
lowerCAmelCase__ : Dict = input_dict["""input_ids"""]
lowerCAmelCase__ : int = input_ids.ne(1 ).to(UpperCAmelCase__ )
lowerCAmelCase__ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase__ : int = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase__ : List[Any] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCAmelCase ( self ) -> int:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[int] = input_dict["""input_ids"""]
lowerCAmelCase__ : Union[str, Any] = FalconForCausalLM(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase__ : Optional[int] = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
lowerCAmelCase__ : int = input_ids.shape[0]
lowerCAmelCase__ : List[Any] = model._convert_to_rw_cache(result.past_key_values )
lowerCAmelCase__ : Optional[int] = model._convert_cache_to_standard_format(UpperCAmelCase__ , UpperCAmelCase__ )
for layer in range(len(UpperCAmelCase__ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def _lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[str] = 3
lowerCAmelCase__ : str = """multi_label_classification"""
lowerCAmelCase__ : List[Any] = input_dict["""input_ids"""]
lowerCAmelCase__ : Tuple = input_ids.ne(1 ).to(UpperCAmelCase__ )
lowerCAmelCase__ : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase__ : List[Any] = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase__ : Any = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(UpperCAmelCase__ , """use_cache""" ):
return
lowerCAmelCase__ : Optional[int] = model_class(UpperCAmelCase__ ).to(UpperCAmelCase__ )
if "use_cache" not in inputs:
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : List[str] = model(**UpperCAmelCase__ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowerCAmelCase__ : Optional[Any] = (
getattr(UpperCAmelCase__ , """decoder_layers""" , UpperCAmelCase__ )
or getattr(UpperCAmelCase__ , """num_decoder_layers""" , UpperCAmelCase__ )
or config.num_hidden_layers
)
lowerCAmelCase__ : Optional[int] = getattr(UpperCAmelCase__ , """num_kv_heads""" , config.num_attention_heads )
lowerCAmelCase__ : Tuple = getattr(UpperCAmelCase__ , """d_model""" , config.hidden_size )
lowerCAmelCase__ : Any = embed_dim // num_attention_heads
lowerCAmelCase__ : Tuple = outputs["""past_key_values"""]
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
lowerCAmelCase__ , lowerCAmelCase__ : int = inputs["""input_ids"""].shape
for i in range(UpperCAmelCase__ ):
if config.new_decoder_architecture:
lowerCAmelCase__ : Tuple = config.num_attention_heads
elif config.multi_query:
lowerCAmelCase__ : Optional[Any] = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
@slow
def _lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
lowerCAmelCase__ : List[Any] = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
model.eval()
model.to(UpperCAmelCase__ )
lowerCAmelCase__ : Tuple = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(UpperCAmelCase__ )
lowerCAmelCase__ : Union[str, Any] = (
"""My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."""
)
lowerCAmelCase__ : List[str] = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=19 )
lowerCAmelCase__ : Optional[Any] = tokenizer.batch_decode(UpperCAmelCase__ )[0]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def _lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowerCAmelCase__ : int = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase__ : Union[str, Any] = FalconForCausalLM.from_pretrained(UpperCAmelCase__ )
model.eval()
model.to(UpperCAmelCase__ )
lowerCAmelCase__ : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(UpperCAmelCase__ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4 )
model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4 )
model.generate(**UpperCAmelCase__ , num_beams=2 , max_new_tokens=4 )
@slow
def _lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowerCAmelCase__ : int = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase__ : Union[str, Any] = FalconForCausalLM.from_pretrained(UpperCAmelCase__ )
model.eval()
model.to(device=UpperCAmelCase__ )
lowerCAmelCase__ : Tuple = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(UpperCAmelCase__ )
# Test results are the same with and without cache
lowerCAmelCase__ : Optional[int] = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=20 , use_cache=UpperCAmelCase__ )
lowerCAmelCase__ : Any = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=20 , use_cache=UpperCAmelCase__ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 361 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Dict ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowerCAmelCase__ : Union[str, Any] = Vector()
def _lowerCAmelCase ( self : Union[str, Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : str = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCamelCase ) , """(0,0,0,0,0,1)""" )
def _lowerCAmelCase ( self : Any ) -> None:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCamelCase ) , 4 )
def _lowerCAmelCase ( self : List[str] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : str = Vector([1, 2] )
lowerCAmelCase__ : Optional[int] = Vector([1, 2, 3, 4, 5] )
lowerCAmelCase__ : Union[str, Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowerCAmelCase__ : List[Any] = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def _lowerCAmelCase ( self : Any ) -> None:
"""simple docstring"""
lowerCAmelCase__ : int = Vector([1, 2, 3] )
lowerCAmelCase__ : Optional[Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def _lowerCAmelCase ( self : Optional[Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = Vector([1, 2, 3] )
lowerCAmelCase__ : Dict = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def _lowerCAmelCase ( self : Optional[Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Any = Vector([1, 2, 3] )
lowerCAmelCase__ : Any = Vector([2, -1, 4] ) # for test of dot product
lowerCAmelCase__ : Any = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def _lowerCAmelCase ( self : int ) -> None:
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def _lowerCAmelCase ( self : Tuple ) -> None:
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def _lowerCAmelCase ( self : Optional[Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = Vector([1, 2, 3] )
lowerCAmelCase__ : Optional[Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCamelCase , UpperCamelCase ) ) , """(3,4,7)""" )
def _lowerCAmelCase ( self : Optional[int] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = Vector([1, 0, 0, 0, 0, 0] )
lowerCAmelCase__ : Any = x.copy()
self.assertEqual(str(UpperCamelCase ) , str(UpperCamelCase ) )
def _lowerCAmelCase ( self : Optional[Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCamelCase ) , """(0,1,0)""" )
def _lowerCAmelCase ( self : int ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCamelCase ) )
def _lowerCAmelCase ( self : Union[str, Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase__ : Dict = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCamelCase , UpperCamelCase ) )
def _lowerCAmelCase ( self : List[Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase__ : int = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCamelCase , UpperCamelCase ) )
def _lowerCAmelCase ( self : int ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def _lowerCAmelCase ( self : int ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowerCAmelCase__ : Tuple = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def _lowerCAmelCase ( self : str ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCamelCase ) )
def _lowerCAmelCase ( self : Tuple ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def _lowerCAmelCase ( self : Any ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase__ : List[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def _lowerCAmelCase ( self : List[Any] ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase__ : Dict = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def _lowerCAmelCase ( self : Union[str, Any] ) -> None:
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 212 | 0 |
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( __snake_case = "https://www.worldometers.info/coronavirus" ) -> dict:
"""simple docstring"""
_lowercase =BeautifulSoup(requests.get(__snake_case ).text , '''html.parser''' )
_lowercase =soup.findAll('''h1''' )
_lowercase =soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(__snake_case , __snake_case )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 5 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCamelCase__ ( lowerCAmelCase):
SCREAMING_SNAKE_CASE__ = '''esm'''
def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1_0_2_6 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase , mask_token_id=UpperCAmelCase , **UpperCAmelCase )
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =position_embedding_type
_lowercase =use_cache
_lowercase =emb_layer_norm_before
_lowercase =token_dropout
_lowercase =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_lowercase =EsmFoldConfig()
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =EsmFoldConfig(**UpperCAmelCase )
_lowercase =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_lowercase =get_default_vocab_list()
else:
_lowercase =vocab_list
else:
_lowercase =None
_lowercase =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def __A (self ) -> List[str]:
_lowercase =super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase ):
_lowercase =self.esmfold_config.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> Union[str, Any]:
if self.trunk is None:
_lowercase =TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase ):
_lowercase =TrunkConfig(**self.trunk )
def __A (self ) -> Tuple:
_lowercase =asdict(self )
_lowercase =self.trunk.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 48
SCREAMING_SNAKE_CASE__ = 1024
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = None
def __A (self ) -> List[str]:
if self.structure_module is None:
_lowercase =StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase ):
_lowercase =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
_lowercase =self.sequence_state_dim // self.sequence_head_width
_lowercase =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." )
def __A (self ) -> Dict:
_lowercase =asdict(self )
_lowercase =self.structure_module.to_dict()
return output
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE__ = 384
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 16
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 1E-8
SCREAMING_SNAKE_CASE__ = 1E5
def __A (self ) -> List[Any]:
return asdict(self )
def UpperCAmelCase_ ( ) -> Tuple:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 5 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 366 |
"""simple docstring"""
import math
import sys
def __lowerCAmelCase ( lowercase : int ) -> int:
"""simple docstring"""
if number != int(lowercase ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
snake_case : Optional[Any] = [-1] * (number + 1)
snake_case : str = 0
for i in range(1 , number + 1 ):
snake_case : List[Any] = sys.maxsize
snake_case : Union[str, Any] = int(math.sqrt(lowercase ) )
for j in range(1 , root + 1 ):
snake_case : List[str] = 1 + answers[i - (j**2)]
snake_case : Optional[Any] = min(lowercase , lowercase )
snake_case : Any = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112 | 0 |
"""simple docstring"""
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class lowerCAmelCase__ ( lowercase_ ):
def __init__( self : Any , _lowerCamelCase : Optional[Any] ):
_snake_case = data
def __iter__( self : List[str] ):
for element in self.data:
yield element
def _UpperCAmelCase ( __lowerCamelCase : int=True ) -> List[Any]:
_snake_case = Accelerator(even_batches=__lowerCamelCase )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple = False ) -> Dict:
if iterable:
_snake_case = DummyIterableDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
else:
_snake_case = TensorDataset(torch.as_tensor(range(__lowerCamelCase ) ) )
_snake_case = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase )
_snake_case = accelerator.prepare(__lowerCamelCase )
return dl
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , ) -> Union[str, Any]:
_snake_case = create_dataloader(accelerator=__lowerCamelCase , dataset_size=__lowerCamelCase , batch_size=__lowerCamelCase )
_snake_case = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _UpperCAmelCase ( ) -> Tuple:
_snake_case = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _UpperCAmelCase ( ) -> Optional[int]:
_snake_case = create_accelerator(even_batches=__lowerCamelCase )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _UpperCAmelCase ( ) -> List[str]:
_snake_case = create_accelerator(even_batches=__lowerCamelCase )
_snake_case = torch.nn.Linear(1 , 1 )
_snake_case = accelerator.prepare(__lowerCamelCase )
_snake_case = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
_snake_case = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__lowerCamelCase ):
_snake_case = ddp_model(batch[0].float() )
_snake_case = output.sum()
loss.backward()
batch_idxs.append(__lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _UpperCAmelCase ( __lowerCamelCase : List[Any] ) -> List[Any]:
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for multi-GPU" in str(w[-1].message )
def _UpperCAmelCase ( ) -> Optional[Any]:
_snake_case = True
_snake_case = False
_snake_case = create_accelerator(even_batches=__lowerCamelCase )
_snake_case = torch.nn.Linear(1 , 1 )
_snake_case = accelerator.prepare(__lowerCamelCase )
_snake_case = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
_snake_case = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
_snake_case = train_dl.batch_sampler.even_batches
_snake_case = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _UpperCAmelCase ( ) -> Any:
_snake_case = True
_snake_case = False
_snake_case = create_accelerator(even_batches=__lowerCamelCase )
_snake_case = torch.nn.Linear(1 , 1 )
_snake_case = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
_snake_case = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('''ignore''' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
_snake_case = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _UpperCAmelCase ( ) -> Any:
_snake_case = create_accelerator()
_snake_case = torch.nn.Linear(1 , 1 )
_snake_case = accelerator.prepare(__lowerCamelCase )
create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase )
with warnings.catch_warnings(record=__lowerCamelCase ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ):
pass
assert issubclass(w[-1].category , __lowerCamelCase )
assert "only supported for map-style datasets" in str(w[-1].message )
def _UpperCAmelCase ( ) -> Tuple:
_snake_case = create_accelerator()
accelerator.print('''Test that even_batches variable ensures uniform batches across processes''' )
test_default_ensures_even_batch_sizes()
accelerator.print('''Run tests with even_batches disabled''' )
test_can_disable_even_batches()
accelerator.print('''Test joining uneven inputs''' )
test_can_join_uneven_inputs()
accelerator.print('''Test overriding even_batches when joining uneven inputs''' )
test_join_can_override_even_batches()
accelerator.print('''Test overriding even_batches for mixed dataloader types''' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('''Test overriding even_batches raises a warning for iterable dataloaders''' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('''Test join with non DDP distributed raises warning''' )
_snake_case = accelerator.state.distributed_type
_snake_case = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__lowerCamelCase )
_snake_case = original_state
if __name__ == "__main__":
main()
| 288 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _UpperCAmelCase :
UpperCamelCase = PegasusConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str=13 , __UpperCamelCase :List[Any]=7 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :List[Any]=False , __UpperCamelCase :Any=99 , __UpperCamelCase :Tuple=32 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Tuple=37 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Optional[int]=40 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Dict=1 , __UpperCamelCase :Any=0 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = eos_token_id
A = pad_token_id
A = bos_token_id
def lowerCamelCase ( self :Tuple ):
A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A = tf.concat([input_ids, eos_tensor] , axis=1 )
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowerCamelCase ( self :str , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] ):
A = TFPegasusModel(config=__UpperCamelCase ).get_decoder()
A = inputs_dict["input_ids"]
A = input_ids[:1, :]
A = inputs_dict["attention_mask"][:1, :]
A = inputs_dict["head_mask"]
A = 1
# first forward pass
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
A, A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) , config.vocab_size )
A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A = tf.concat([input_ids, next_tokens] , axis=-1 )
A = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A = output_from_no_past[:, -3:, random_slice_idx]
A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ):
if attention_mask is None:
A = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :int ):
A = TFPegasusModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self :Any ):
A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCamelCase = '''google/pegasus-xsum'''
@cached_property
def lowerCamelCase ( self :Any ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase ( self :Dict ):
A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase ( self :str , **__UpperCamelCase :str ):
A = self.translate_src_text(**__UpperCamelCase )
assert self.expected_text == generated_words
def lowerCamelCase ( self :Any , **__UpperCamelCase :List[str] ):
A = self.tokenizer(self.src_text , **__UpperCamelCase , padding=__UpperCamelCase , return_tensors="tf" )
A = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , )
A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase )
return generated_words
@slow
def lowerCamelCase ( self :Union[str, Any] ):
self._assert_generated_batch_equal_expected()
| 292 | 0 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _snake_case :
'''simple docstring'''
def __init__( self: Dict ) -> Any:
UpperCAmelCase_ : Optional[Any] = """"""
UpperCAmelCase_ : Tuple = """"""
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : str = 256
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Any = 0
def A__ ( self: Dict ,lowerCamelCase_: Dict ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = cva.imread(lowerCamelCase_ ,0 )
UpperCAmelCase_ : List[str] = copy.deepcopy(self.img )
UpperCAmelCase_ : Union[str, Any] = plt.hist(self.img.ravel() ,256 ,[0, 256] ,label="""x""" )
UpperCAmelCase_ : Optional[Any] = np.sum(lowerCamelCase_ )
for i in range(len(lowerCamelCase_ ) ):
UpperCAmelCase_ : Optional[int] = x[i] / self.k
self.sk += prk
UpperCAmelCase_ : List[str] = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase_ : Optional[int] = int(last % last )
UpperCAmelCase_ : Optional[Any] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase_ : Optional[Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase_ : List[str] = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase_ : Union[str, Any] = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" ,self.img )
def A__ ( self: int ) -> Optional[int]:
plt.hist(self.img.ravel() ,256 ,[0, 256] )
def A__ ( self: Any ) -> Optional[int]:
cva.imshow("""Output-Image""" ,self.img )
cva.imshow("""Input-Image""" ,self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 350 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase_ = '''true'''
def lowerCamelCase_ ( _a : List[Any] , _a : List[str]=82 , _a : Tuple=16 ):
'''simple docstring'''
set_seed(42 )
UpperCAmelCase_ : int = RegressionModel()
UpperCAmelCase_ : List[Any] = deepcopy(_a )
UpperCAmelCase_ : Tuple = RegressionDataset(length=_a )
UpperCAmelCase_ : int = DataLoader(_a , batch_size=_a )
model.to(accelerator.device )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.prepare(_a , _a )
return model, ddp_model, dataloader
def lowerCamelCase_ ( _a : Accelerator , _a : Optional[int]=False ):
'''simple docstring'''
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
UpperCAmelCase_ : int = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(_a : str ):
UpperCAmelCase_ : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_a , max_length=_a )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : List[str] = dataset.map(
_a , batched=_a , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_a : List[str] ):
if use_longest:
return tokenizer.pad(_a , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(_a , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return DataLoader(_a , shuffle=_a , collate_fn=_a , batch_size=16 )
def lowerCamelCase_ ( _a : Any , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : int = Accelerator(dispatch_batches=_a , split_batches=_a )
UpperCAmelCase_ : Dict = get_dataloader(_a , not dispatch_batches )
UpperCAmelCase_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=_a )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(_a , _a )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase_ ( _a : Optional[int] , _a : Optional[Any] , _a : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = []
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = batch.values()
with torch.no_grad():
UpperCAmelCase_ : str = model(_a )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(_a )
targs.append(_a )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = torch.cat(_a ), torch.cat(_a )
return logits, targs
def lowerCamelCase_ ( _a : Accelerator , _a : str=82 , _a : str=False , _a : Dict=False , _a : Dict=16 ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_basic_setup(_a , _a , _a )
UpperCAmelCase_ , UpperCAmelCase_ : Any = generate_predictions(_a , _a , _a )
assert (
len(_a ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_a )}'''
def lowerCamelCase_ ( _a : bool = False , _a : bool = False ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = evaluate.load("""glue""" , """mrpc""" )
UpperCAmelCase_ , UpperCAmelCase_ : str = get_mrpc_setup(_a , _a )
# First do baseline
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = setup["""no"""]
model.to(_a )
model.eval()
for batch in dataloader:
batch.to(_a )
with torch.inference_mode():
UpperCAmelCase_ : str = model(**_a )
UpperCAmelCase_ : Any = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_a , references=batch["""labels"""] )
UpperCAmelCase_ : str = metric.compute()
# Then do distributed
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : List[str] = model(**_a )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Union[str, Any] = batch["""labels"""]
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_a , references=_a )
UpperCAmelCase_ : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = Accelerator(split_batches=_a , dispatch_batches=_a )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(_a , _a )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : Optional[int] = Accelerator(split_batches=_a , dispatch_batches=_a )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(_a , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
UpperCAmelCase_ : str = Accelerator()
test_torch_metrics(_a , 512 )
accelerator.state._reset_state()
def lowerCamelCase_ ( _a : Optional[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 59 | 0 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
a_ : Optional[int] = logging.getLogger(__name__)
def a_ ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ =np.argmax(__snake_case , axis=1 )
return np.sum(outputs == labels )
def a_ ( __snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
with open(__snake_case , encoding='''utf_8''' ) as f:
lowerCamelCase_ =csv.reader(__snake_case )
lowerCamelCase_ =[]
next(__snake_case ) # skip the first line
for line in tqdm(__snake_case ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a_ ( __snake_case : str , __snake_case : Dict , __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Dict ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[]
for dataset in encoded_datasets:
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCamelCase_ =np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCamelCase_ =np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCamelCase_ =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__snake_case ):
lowerCamelCase_ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCamelCase_ =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCamelCase_ =with_conta
lowerCamelCase_ =with_conta
lowerCamelCase_ =len(__snake_case ) - 1
lowerCamelCase_ =len(__snake_case ) - 1
lowerCamelCase_ =with_conta
lowerCamelCase_ =with_conta
lowerCamelCase_ =mc_label
lowerCamelCase_ =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__snake_case ) for t in all_inputs ) )
return tensor_datasets
def a_ ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__snake_case , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=__snake_case , type=__snake_case , required=__snake_case , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=__snake_case , default='''''' )
parser.add_argument('''--eval_dataset''' , type=__snake_case , default='''''' )
parser.add_argument('''--seed''' , type=__snake_case , default=42 )
parser.add_argument('''--num_train_epochs''' , type=__snake_case , default=3 )
parser.add_argument('''--train_batch_size''' , type=__snake_case , default=8 )
parser.add_argument('''--eval_batch_size''' , type=__snake_case , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=__snake_case , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=__snake_case , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=__snake_case , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__snake_case , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=__snake_case , default=6.25e-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=__snake_case , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=__snake_case , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=__snake_case , default=0.0_1 )
parser.add_argument('''--lm_coef''' , type=__snake_case , default=0.9 )
parser.add_argument('''--n_valid''' , type=__snake_case , default=374 )
parser.add_argument('''--server_ip''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=__snake_case , default='''''' , help='''Can be used for distant debugging.''' )
lowerCamelCase_ =parser.parse_args()
print(__snake_case )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__snake_case )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCamelCase_ =torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowerCamelCase_ =torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(__snake_case , __snake_case ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCamelCase_ =['''_start_''', '''_delimiter_''', '''_classify_''']
lowerCamelCase_ =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__snake_case )
lowerCamelCase_ =tokenizer.convert_tokens_to_ids(__snake_case )
lowerCamelCase_ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__snake_case ) )
model.to(__snake_case )
# Load and encode the datasets
def tokenize_and_encode(__snake_case : Union[str, Any] ):
if isinstance(__snake_case , __snake_case ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__snake_case ) )
elif isinstance(__snake_case , __snake_case ):
return obj
return [tokenize_and_encode(__snake_case ) for o in obj]
logger.info('''Encoding dataset...''' )
lowerCamelCase_ =load_rocstories_dataset(args.train_dataset )
lowerCamelCase_ =load_rocstories_dataset(args.eval_dataset )
lowerCamelCase_ =(train_dataset, eval_dataset)
lowerCamelCase_ =tokenize_and_encode(__snake_case )
# Compute the max input length for the Transformer
lowerCamelCase_ =model.config.n_positions // 2 - 2
lowerCamelCase_ =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCamelCase_ =min(__snake_case , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCamelCase_ =pre_process_datasets(__snake_case , __snake_case , __snake_case , *__snake_case )
lowerCamelCase_, lowerCamelCase_ =tensor_datasets[0], tensor_datasets[1]
lowerCamelCase_ =TensorDataset(*__snake_case )
lowerCamelCase_ =RandomSampler(__snake_case )
lowerCamelCase_ =DataLoader(__snake_case , sampler=__snake_case , batch_size=args.train_batch_size )
lowerCamelCase_ =TensorDataset(*__snake_case )
lowerCamelCase_ =SequentialSampler(__snake_case )
lowerCamelCase_ =DataLoader(__snake_case , sampler=__snake_case , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCamelCase_ =args.max_steps
lowerCamelCase_ =args.max_steps // (len(__snake_case ) // args.gradient_accumulation_steps) + 1
else:
lowerCamelCase_ =len(__snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCamelCase_ =list(model.named_parameters() )
lowerCamelCase_ =['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
lowerCamelCase_ =[
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
lowerCamelCase_ =AdamW(__snake_case , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCamelCase_ =get_linear_schedule_with_warmup(
__snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=__snake_case )
if args.do_train:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
lowerCamelCase_ =0
lowerCamelCase_ =0
lowerCamelCase_ =tqdm(__snake_case , desc='''Training''' )
for step, batch in enumerate(__snake_case ):
lowerCamelCase_ =tuple(t.to(__snake_case ) for t in batch )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =batch
lowerCamelCase_ =model(__snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case )
lowerCamelCase_ =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCamelCase_ =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCamelCase_ ='''Training loss: {:.2e} lr: {:.2e}'''.format(__snake_case , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCamelCase_ =model.module if hasattr(__snake_case , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCamelCase_ =os.path.join(args.output_dir , __snake_case )
lowerCamelCase_ =os.path.join(args.output_dir , __snake_case )
torch.save(model_to_save.state_dict() , __snake_case )
model_to_save.config.to_json_file(__snake_case )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCamelCase_ =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCamelCase_ =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__snake_case )
if args.do_eval:
model.eval()
lowerCamelCase_, lowerCamelCase_ =0, 0
lowerCamelCase_, lowerCamelCase_ =0, 0
for batch in tqdm(__snake_case , desc='''Evaluating''' ):
lowerCamelCase_ =tuple(t.to(__snake_case ) for t in batch )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =batch
with torch.no_grad():
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =model(
__snake_case , mc_token_ids=__snake_case , lm_labels=__snake_case , mc_labels=__snake_case )
lowerCamelCase_ =mc_logits.detach().cpu().numpy()
lowerCamelCase_ =mc_labels.to('''cpu''' ).numpy()
lowerCamelCase_ =accuracy(__snake_case , __snake_case )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCamelCase_ =eval_loss / nb_eval_steps
lowerCamelCase_ =eval_accuracy / nb_eval_examples
lowerCamelCase_ =tr_loss / nb_tr_steps if args.do_train else None
lowerCamelCase_ ={'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
lowerCamelCase_ =os.path.join(args.output_dir , '''eval_results.txt''' )
with open(__snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , __snake_case , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 75 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase_ :
"""simple docstring"""
def __init__( self : str ,lowercase__ : Tuple ,lowercase__ : Dict=1_3 ,lowercase__ : List[str]=3_0 ,lowercase__ : Tuple=2 ,lowercase__ : Optional[int]=3 ,lowercase__ : List[str]=True ,lowercase__ : Tuple=True ,lowercase__ : int=3_2 ,lowercase__ : List[str]=5 ,lowercase__ : Tuple=4 ,lowercase__ : Any=3_7 ,lowercase__ : Any="gelu" ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : Optional[int]=0.1 ,lowercase__ : str=1_0 ,lowercase__ : Optional[int]=0.0_2 ,lowercase__ : Union[str, Any]=3 ,lowercase__ : Optional[int]=0.6 ,lowercase__ : List[Any]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = mask_ratio
__lowercase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : str ,lowercase__ : Optional[int] ,lowercase__ : List[str] ):
__lowercase = ViTMAEModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[Any] ,lowercase__ : int ,lowercase__ : Optional[Any] ):
__lowercase = ViTMAEForPreTraining(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
__lowercase = (self.image_size // self.patch_size) ** 2
__lowercase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__lowercase = 1
__lowercase = ViTMAEForPreTraining(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(lowercase__ )
__lowercase = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Dict = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = ViTMAEModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,has_text_modality=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ ,nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
__lowercase = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__lowercase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__lowercase = torch.from_numpy(lowercase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__lowercase = pt_noise
super().check_pt_tf_models(lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs[0].cpu().numpy()
__lowercase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase__ )
__lowercase = model_class.from_pretrained(lowercase__ )
model.to(lowercase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
# Make sure we don't have nans
__lowercase = after_outputs[0].cpu().numpy()
__lowercase = 0
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase__ ,1e-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def SCREAMING_SNAKE_CASE ( self : int ):
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = ViTMAEModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _A ( ):
"""simple docstring"""
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self : str ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
__lowercase = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(lowercase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__lowercase = ViTMAEConfig()
__lowercase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__lowercase = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ ,noise=torch.from_numpy(lowercase__ ).to(device=lowercase__ ) )
# verify the logits
__lowercase = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(lowercase__ ) ,atol=1e-4 ) )
| 104 | 0 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def A_ ( A__ , A__ ) -> Tuple:
a__ : Any = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{\"default\": {\"dataset_size\": 42}}' )
a__ : Optional[Any] = DatasetInfosDict.from_directory(__lowerCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def A_ ( A__ , A__ ) -> Optional[Any]:
a__ : List[Any] = str(__lowerCAmelCase )
dataset_info.write_to_directory(__lowerCAmelCase )
a__ : Optional[Any] = DatasetInfo.from_directory(__lowerCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__lowerCAmelCase , 'dataset_info.json' ) )
def A_ ( ) -> int:
a__ : Optional[int] = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
a__ : List[str] = dataset_info._to_yaml_dict()
assert sorted(__lowerCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
a__ : str = yaml.safe_dump(__lowerCAmelCase )
a__ : Dict = yaml.safe_load(__lowerCAmelCase )
assert dataset_info_yaml_dict == reloaded
def A_ ( ) -> Dict:
a__ : Optional[int] = DatasetInfo()
a__ : int = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def A_ ( A__ , A__ ) -> Optional[Any]:
a__ : Tuple = str(__lowerCAmelCase )
dataset_infos_dict.write_to_directory(__lowerCAmelCase )
a__ : List[str] = DatasetInfosDict.from_directory(__lowerCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
a__ : Tuple = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
a__ : List[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__lowerCAmelCase , 'README.md' ) )
| 353 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=0.2 , lowercase=0.2) -> Any:
'''simple docstring'''
a__ : Tuple = bp_numa
a__ : Union[str, Any] = bp_numa
a__ : Optional[int] = bp_numa
a__ : Optional[int] = conva_get[:2]
a__ : Optional[Any] = conva_get[2]
a__ : Optional[int] = size_pa
a__ : Union[str, Any] = rate_w
a__ : Dict = rate_t
a__ : int = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
a__ : Optional[int] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
a__ : Optional[int] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
a__ : Any = -2 * np.random.rand(self.conva[1]) + 1
a__ : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1
a__ : Optional[int] = -2 * np.random.rand(self.num_bpa) + 1
def __lowercase ( self , lowercase) -> List[Any]:
'''simple docstring'''
a__ : Optional[Any] = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(lowercase , 'wb') as f:
pickle.dump(lowercase , lowercase)
print(F'Model saved: {save_path}')
@classmethod
def __lowercase ( cls , lowercase) -> Any:
'''simple docstring'''
with open(lowercase , 'rb') as f:
a__ : Any = pickle.load(lowercase) # noqa: S301
a__ : Dict = model_dic.get('conv1')
conv_get.append(model_dic.get('step_conv1'))
a__ : Tuple = model_dic.get('size_pooling1')
a__ : Optional[int] = model_dic.get('num_bp1')
a__ : Tuple = model_dic.get('num_bp2')
a__ : Optional[Any] = model_dic.get('num_bp3')
a__ : Optional[Any] = model_dic.get('rate_weight')
a__ : int = model_dic.get('rate_thre')
# create model instance
a__ : Union[str, Any] = CNN(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase)
# modify model parameter
a__ : str = model_dic.get('w_conv1')
a__ : Optional[int] = model_dic.get('wkj')
a__ : Tuple = model_dic.get('vji')
a__ : str = model_dic.get('thre_conv1')
a__ : List[str] = model_dic.get('thre_bp2')
a__ : Tuple = model_dic.get('thre_bp3')
return conv_ins
def __lowercase ( self , lowercase) -> Any:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x))
def __lowercase ( self , lowercase) -> Optional[int]:
'''simple docstring'''
return round(lowercase , 3)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase) -> Any:
'''simple docstring'''
a__ : Union[str, Any] = convs[0]
a__ : Tuple = convs[1]
a__ : Any = np.shape(lowercase)[0]
# get the data slice of original image data, data_focus
a__ : Tuple = []
for i_focus in range(0 , size_data - size_conv + 1 , lowercase):
for j_focus in range(0 , size_data - size_conv + 1 , lowercase):
a__ : str = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowercase)
# calculate the feature map of every single kernel, and saved as list of matrix
a__ : str = []
a__ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(lowercase):
a__ : Tuple = []
for i_focus in range(len(lowercase)):
a__ : Optional[Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(lowercase))
a__ : Dict = np.asmatrix(lowercase).reshape(
lowercase , lowercase)
data_featuremap.append(lowercase)
# expanding the data slice to One dimenssion
a__ : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowercase))
a__ : Optional[int] = np.asarray(lowercase)
return focus_list, data_featuremap
def __lowercase ( self , lowercase , lowercase , lowercase="average_pool") -> str:
'''simple docstring'''
a__ : Any = len(featuremaps[0])
a__ : int = int(size_map / size_pooling)
a__ : Optional[Any] = []
for i_map in range(len(lowercase)):
a__ : Any = featuremaps[i_map]
a__ : Optional[int] = []
for i_focus in range(0 , lowercase , lowercase):
for j_focus in range(0 , lowercase , lowercase):
a__ : Any = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowercase))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowercase))
a__ : List[str] = np.asmatrix(lowercase).reshape(lowercase , lowercase)
featuremap_pooled.append(lowercase)
return featuremap_pooled
def __lowercase ( self , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : Any = []
for i in range(len(lowercase)):
a__ : Tuple = np.shape(data[i])
a__ : List[str] = data[i].reshape(1 , shapes[0] * shapes[1])
a__ : Optional[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(lowercase)
a__ : Union[str, Any] = np.asarray(lowercase)
return data_expanded
def __lowercase ( self , lowercase) -> Dict:
'''simple docstring'''
a__ : Dict = np.asarray(lowercase)
a__ : Optional[int] = np.shape(lowercase)
a__ : Any = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__ : int = []
a__ : Optional[int] = 0
for i_map in range(lowercase):
a__ : Optional[Any] = np.ones((size_map, size_map))
for i in range(0 , lowercase , lowercase):
for j in range(0 , lowercase , lowercase):
a__ : Union[str, Any] = pd_pool[
i_pool
]
a__ : Tuple = i_pool + 1
a__ : Optional[int] = np.multiply(
lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(lowercase)
return pd_all
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=bool) -> str:
'''simple docstring'''
print('----------------------Start Training-------------------------')
print((' - - Shape: Train_Data ', np.shape(lowercase)))
print((' - - Shape: Teach_Data ', np.shape(lowercase)))
a__ : Dict = 0
a__ : List[Any] = []
a__ : Optional[int] = 1_0000
while rp < n_repeat and mse >= error_accuracy:
a__ : Dict = 0
print(F'-------------Learning Time {rp}--------------')
for p in range(len(lowercase)):
# print('------------Learning Image: %d--------------'%p)
a__ : Dict = np.asmatrix(datas_train[p])
a__ : Any = np.asarray(datas_teach[p])
a__ , a__ : Optional[int] = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a__ : Dict = self.pooling(lowercase , self.size_poolinga)
a__ : Optional[Any] = np.shape(lowercase)
a__ : Union[str, Any] = self._expand(lowercase)
a__ : List[Any] = data_bp_input
a__ : Tuple = np.dot(lowercase , self.vji.T) - self.thre_bpa
a__ : Any = self.sig(lowercase)
a__ : Any = np.dot(lowercase , self.wkj.T) - self.thre_bpa
a__ : Any = self.sig(lowercase)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
a__ : Any = np.multiply(
(data_teach - bp_outa) , np.multiply(lowercase , (1 - bp_outa)))
a__ : Optional[Any] = np.multiply(
np.dot(lowercase , self.wkj) , np.multiply(lowercase , (1 - bp_outa)))
a__ : Tuple = np.dot(lowercase , self.vji)
a__ : Union[str, Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
a__ : List[str] = pd_conva_pooled.T.getA().tolist()
a__ : str = self._calculate_gradient_from_pool(
lowercase , lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
a__ : Optional[int] = self._expand_mat(pd_conva_all[k_conv])
a__ : int = self.rate_weight * np.dot(lowercase , lowercase)
a__ : List[str] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
a__ : str = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
a__ : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
a__ : List[str] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
a__ : Tuple = self.thre_bpa - pd_k_all * self.rate_thre
a__ : Tuple = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
a__ : List[str] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
a__ : Any = rp + 1
a__ : Optional[Any] = error_count / patterns
all_mse.append(lowercase)
def draw_error():
a__ : int = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(lowercase , '+-')
plt.plot(lowercase , 'r--')
plt.xlabel('Learning Times')
plt.ylabel('All_mse')
plt.grid(lowercase , alpha=0.5)
plt.show()
print('------------------Training Complished---------------------')
print((' - - Training epoch: ', rp, F' - - Mse: {mse:.6f}'))
if draw_e:
draw_error()
return mse
def __lowercase ( self , lowercase) -> Optional[int]:
'''simple docstring'''
a__ : str = []
print('-------------------Start Testing-------------------------')
print((' - - Shape: Test_Data ', np.shape(lowercase)))
for p in range(len(lowercase)):
a__ : int = np.asmatrix(datas_test[p])
a__ , a__ : Optional[int] = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a__ : str = self.pooling(lowercase , self.size_poolinga)
a__ : Optional[int] = self._expand(lowercase)
a__ : str = data_bp_input
a__ : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
a__ : Optional[Any] = self.sig(lowercase)
a__ : int = bp_outa * self.wkj.T - self.thre_bpa
a__ : Dict = self.sig(lowercase)
produce_out.extend(bp_outa.getA().tolist())
a__ : List[Any] = [list(map(self.do_round , lowercase)) for each in produce_out]
return np.asarray(lowercase)
def __lowercase ( self , lowercase) -> str:
'''simple docstring'''
a__ : str = np.asmatrix(lowercase)
a__ , a__ : str = self.convolute(
lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a__ : List[str] = self.pooling(lowercase , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 225 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class A ( __UpperCAmelCase ):
lowerCamelCase : Optional[Any] = """unispeech"""
def __init__( self , lowerCamelCase__=32 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-5 , lowerCamelCase__="group" , lowerCamelCase__="gelu" , lowerCamelCase__=(512, 512, 512, 512, 512, 512, 512) , lowerCamelCase__=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase__=(10, 3, 3, 3, 3, 2, 2) , lowerCamelCase__=False , lowerCamelCase__=128 , lowerCamelCase__=16 , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__=320 , lowerCamelCase__=2 , lowerCamelCase__=0.1 , lowerCamelCase__=100 , lowerCamelCase__=256 , lowerCamelCase__=256 , lowerCamelCase__=0.1 , lowerCamelCase__="mean" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=80 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , lowerCamelCase__=0.5 , **lowerCamelCase__ , ) -> List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
lowercase__ = hidden_size
lowercase__ = feat_extract_norm
lowercase__ = feat_extract_activation
lowercase__ = list(lowerCamelCase__ )
lowercase__ = list(lowerCamelCase__ )
lowercase__ = list(lowerCamelCase__ )
lowercase__ = conv_bias
lowercase__ = num_conv_pos_embeddings
lowercase__ = num_conv_pos_embedding_groups
lowercase__ = len(self.conv_dim )
lowercase__ = num_hidden_layers
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = feat_proj_dropout
lowercase__ = final_dropout
lowercase__ = layerdrop
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = num_ctc_classes
lowercase__ = vocab_size
lowercase__ = do_stable_layer_norm
lowercase__ = use_weighted_layer_sum
lowercase__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = apply_spec_augment
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
lowercase__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__ = num_codevectors_per_group
lowercase__ = num_codevector_groups
lowercase__ = contrastive_logits_temperature
lowercase__ = feat_quantizer_dropout
lowercase__ = num_negatives
lowercase__ = codevector_dim
lowercase__ = proj_codevector_dim
lowercase__ = diversity_loss_weight
# ctc loss
lowercase__ = ctc_loss_reduction
lowercase__ = ctc_zero_infinity
# pretraining loss
lowercase__ = replace_prob
@property
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 164 |
'''simple docstring'''
def _A ( lowercase__ = 1000000 ):
lowercase__ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , lowercase__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 164 | 1 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
SCREAMING_SNAKE_CASE_: Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
SCREAMING_SNAKE_CASE_: List[Any] = "xvjiarui/stable-diffusion-2-inpainting"
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__ , safety_checker=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = "Face of a yellow cat, high resolution, sitting on a park bench"
SCREAMING_SNAKE_CASE_: List[Any] = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE_: Any = 50
SCREAMING_SNAKE_CASE_: Any = jax.device_count()
SCREAMING_SNAKE_CASE_: List[Any] = num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] = num_samples * [init_image]
SCREAMING_SNAKE_CASE_: Union[str, Any] = num_samples * [mask_image]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = pipeline.prepare_inputs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# shard inputs and rng
SCREAMING_SNAKE_CASE_: Any = replicate(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = jax.random.split(lowerCAmelCase__ , jax.device_count())
SCREAMING_SNAKE_CASE_: Tuple = shard(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = shard(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = shard(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = pipeline(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = output.images.reshape(lowerCAmelCase__ , 512 , 512 , 3)
SCREAMING_SNAKE_CASE_: int = images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Tuple = jnp.asarray(jax.device_get(image_slice.flatten()))
SCREAMING_SNAKE_CASE_: int = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084])
print(F"output_slice: {output_slice}")
assert jnp.abs(output_slice - expected_slice).max() < 1E-2
| 127 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : Optional[int]=30 , lowerCAmelCase__ : Dict=400 , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Any=[0.5, 0.5, 0.5] , lowerCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Tuple=1 / 255 , lowerCAmelCase__ : int=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE_: Optional[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
SCREAMING_SNAKE_CASE_: str = parent
SCREAMING_SNAKE_CASE_: Tuple = batch_size
SCREAMING_SNAKE_CASE_: Tuple = num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] = min_resolution
SCREAMING_SNAKE_CASE_: Tuple = max_resolution
SCREAMING_SNAKE_CASE_: List[Any] = do_resize
SCREAMING_SNAKE_CASE_: Optional[int] = size
SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize
SCREAMING_SNAKE_CASE_: Any = image_mean
SCREAMING_SNAKE_CASE_: Dict = image_std
SCREAMING_SNAKE_CASE_: Tuple = do_rescale
SCREAMING_SNAKE_CASE_: int = rescale_factor
SCREAMING_SNAKE_CASE_: int = do_pad
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int=False):
if not batched:
SCREAMING_SNAKE_CASE_: List[str] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_: List[Any] = int(self.size["shortest_edge"] * h / w)
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.size["shortest_edge"]
elif w > h:
SCREAMING_SNAKE_CASE_: Any = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(self.size["shortest_edge"] * w / h)
else:
SCREAMING_SNAKE_CASE_: int = self.size["shortest_edge"]
SCREAMING_SNAKE_CASE_: Dict = self.size["shortest_edge"]
else:
SCREAMING_SNAKE_CASE_: int = []
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
SCREAMING_SNAKE_CASE_: Tuple = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__: item[0])[0]
SCREAMING_SNAKE_CASE_: Optional[Any] = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Any = DeformableDetrImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = DeformableDetrImageProcessingTester(self)
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Dict = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean"))
self.assertTrue(hasattr(lowerCAmelCase__ , "image_std"))
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize"))
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize"))
self.assertTrue(hasattr(lowerCAmelCase__ , "do_rescale"))
self.assertTrue(hasattr(lowerCAmelCase__ , "do_pad"))
self.assertTrue(hasattr(lowerCAmelCase__ , "size"))
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Dict = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333})
self.assertEqual(image_processor.do_pad , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__)
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84})
self.assertEqual(image_processor.do_pad , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
# Initialize image_processing
SCREAMING_SNAKE_CASE_: List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
SCREAMING_SNAKE_CASE_: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image)
# Test not batched input
SCREAMING_SNAKE_CASE_: Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = image_processing(lowerCAmelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE ( self : str):
# Initialize image_processing
SCREAMING_SNAKE_CASE_: Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
SCREAMING_SNAKE_CASE_: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray)
# Test not batched input
SCREAMING_SNAKE_CASE_: str = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_: Any = image_processing(lowerCAmelCase__ , return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
# Initialize image_processing
SCREAMING_SNAKE_CASE_: List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_: int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor)
# Test not batched input
SCREAMING_SNAKE_CASE_: Dict = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_: Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt").pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple):
# prepare image and target
SCREAMING_SNAKE_CASE_: Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r") as f:
SCREAMING_SNAKE_CASE_: str = json.loads(f.read())
SCREAMING_SNAKE_CASE_: Optional[int] = {"image_id": 3_9769, "annotations": target}
# encode them
SCREAMING_SNAKE_CASE_: str = DeformableDetrImageProcessor()
SCREAMING_SNAKE_CASE_: Dict = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="pt")
# verify pixel values
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4))
# verify area
SCREAMING_SNAKE_CASE_: int = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__))
# verify boxes
SCREAMING_SNAKE_CASE_: str = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3))
# verify image_id
SCREAMING_SNAKE_CASE_: str = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__))
# verify is_crowd
SCREAMING_SNAKE_CASE_: int = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__))
# verify class_labels
SCREAMING_SNAKE_CASE_: Tuple = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__))
# verify orig_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__))
# verify size
SCREAMING_SNAKE_CASE_: str = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple):
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE_: Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r") as f:
SCREAMING_SNAKE_CASE_: List[Any] = json.loads(f.read())
SCREAMING_SNAKE_CASE_: Optional[Any] = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
SCREAMING_SNAKE_CASE_: int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic")
# encode them
SCREAMING_SNAKE_CASE_: Any = DeformableDetrImageProcessor(format="coco_panoptic")
SCREAMING_SNAKE_CASE_: Optional[Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="pt")
# verify pixel values
SCREAMING_SNAKE_CASE_: Dict = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4))
# verify area
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__))
# verify boxes
SCREAMING_SNAKE_CASE_: List[str] = torch.Size([6, 4])
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3))
# verify image_id
SCREAMING_SNAKE_CASE_: Any = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__))
# verify is_crowd
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__))
# verify class_labels
SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__))
# verify masks
SCREAMING_SNAKE_CASE_: Tuple = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase__)
# verify orig_size
SCREAMING_SNAKE_CASE_: str = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__))
# verify size
SCREAMING_SNAKE_CASE_: Optional[int] = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__))
| 127 | 1 |
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = '''▁'''
_lowercase = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
_lowercase = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
_lowercase = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
_lowercase = {
'''ernie-m-base''': 5_14,
'''ernie-m-large''': 5_14,
}
_lowercase = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = ["input_ids"]
_lowerCamelCase: Any = VOCAB_FILES_NAMES
_lowerCamelCase: Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: str = RESOURCE_FILES_NAMES
def __init__( self : List[Any] ,A_ : int ,A_ : Tuple=None ,A_ : List[str]=False ,A_ : Union[str, Any]="utf8" ,A_ : List[Any]="[UNK]" ,A_ : Optional[int]="[SEP]" ,A_ : str="[PAD]" ,A_ : int="[CLS]" ,A_ : str="[MASK]" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : str ,) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A_ ,unk_token=A_ ,sep_token=A_ ,pad_token=A_ ,cls_token=A_ ,mask_token=A_ ,vocab_file=A_ ,encoding=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = do_lower_case
A = sentencepiece_model_ckpt
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
A = self.load_vocab(filepath=A_ )
else:
A = {self.sp_model.id_to_piece(A_ ): id for id in range(self.sp_model.get_piece_size() )}
A = {v: k for k, v in self.vocab.items()}
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[str] ) -> Any:
if text is None:
return None
A = self.tokenize(A_ )
A , A = '', []
for i, ch in enumerate(A_ ):
if ch in self.SP_CHAR_MAPPING:
A = self.SP_CHAR_MAPPING.get(A_ )
else:
A = unicodedata.normalize('NFKC' ,A_ )
if self.is_whitespace(A_ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(A_ ) )
A , A , A = normalized_text, [], 0
if self.do_lower_case:
A = text.lower()
for token in split_tokens:
if token[:1] == "▁":
A = token[1:]
A = text[offset:].index(A_ ) + offset
A = start + len(A_ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
A = end
return token_mapping
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
return len(self.vocab )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
return dict(self.vocab ,**self.added_tokens_encoder )
def __getstate__( self : Optional[int] ) -> Optional[int]:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : Any ,A_ : Any ) -> Optional[int]:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Union[str, Any] ) -> int:
return "".join((self.SP_CHAR_MAPPING.get(A_ ,A_ ) for c in text) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ,A_ : List[Any]=False ,A_ : Any=64 ,A_ : int=0.1 ) -> str:
if self.sp_model_kwargs.get('enable_sampling' ) is True:
A = True
if self.sp_model_kwargs.get('alpha' ) is not None:
A = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
A = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
A = self.sp_model.EncodeAsPieces(A_ )
else:
A = self.sp_model.SampleEncodeAsPieces(A_ ,A_ ,A_ )
A = []
for pi, piece in enumerate(A_ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(A_ ) and pi != 0:
new_pieces.append(A_ )
continue
else:
continue
A = 0
for i, chunk in enumerate(A_ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(A_ ) or self.is_punct(A_ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(A_ )
A = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
A = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
A = i
if len(A_ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Dict ) -> Tuple:
A = ''.join(A_ ).replace(A_ ,' ' ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ) -> List[Any]:
A = self.convert_ids_to_tokens(A_ )
A = ''.join(A_ ).replace(A_ ,' ' ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : str ) -> List[Any]:
return self.vocab.get(A_ ,self.vocab.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : int ) -> Optional[Any]:
return self.reverse_vocab.get(A_ ,self.unk_token )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : int ,A_ : int=None ) -> List[str]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A = [self.cls_token_id]
A = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : str ,A_ : Dict=None ) -> List[Any]:
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : str ,A_ : List[str]=None ,A_ : Tuple=False ) -> Union[str, Any]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1]
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]:
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(A_ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(A_ ) + 1) + [1] * (len(A_ ) + 3)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Optional[int] ) -> Optional[int]:
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : int ) -> Dict:
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[Any] ) -> List[Any]:
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : str ) -> Optional[int]:
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(A_ ) == 1:
A = unicodedata.category(A_ )
if cat == "Zs":
return True
return False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Tuple ) -> Any:
A = {}
with io.open(A_ ,'r' ,encoding='utf-8' ) as f:
for index, line in enumerate(A_ ):
A = line.rstrip('\n' )
A = int(A_ )
return token_to_idx
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
A = 0
if os.path.isdir(A_ ):
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
A = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(A_ ,'w' ,encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() ,key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
A = token_index
writer.write(token + '\n' )
index += 1
A = os.path.join(A_ ,'sentencepiece.bpe.model' )
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (vocab_file,) | 74 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(lowercase )
def A ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : Union[torch.Tensor, float, int] , lowercase : torch.Tensor , lowercase : List[torch.tensor] , lowercase : List[float] , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[Dict[str, Any]] = None , lowercase : bool = False , lowercase : bool = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowercase , lowercase , self.nets ) ):
_snake_case , _snake_case = controlnet(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# merge samples
if i == 0:
_snake_case , _snake_case = down_samples, mid_sample
else:
_snake_case = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase , lowercase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = True , lowercase : Callable = None , lowercase : bool = False , lowercase : Optional[str] = None , ):
'''simple docstring'''
_snake_case = 0
_snake_case = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase , is_main_process=lowercase , save_function=lowercase , safe_serialization=lowercase , variant=lowercase , )
idx += 1
_snake_case = model_path_to_save + f'''_{idx}'''
@classmethod
def A ( cls : Any , lowercase : Optional[Union[str, os.PathLike]] , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = 0
_snake_case = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case = pretrained_model_path
while os.path.isdir(lowercase ):
_snake_case = ControlNetModel.from_pretrained(lowercase , **lowercase )
controlnets.append(lowercase )
idx += 1
_snake_case = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(lowercase )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowercase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(lowercase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowercase ) | 282 | 0 |
from copy import deepcopy
class a_ :
'''simple docstring'''
def __init__( self , lowercase_ = None , lowercase_ = None ) -> None:
'''simple docstring'''
if arr is None and size is not None:
lowerCAmelCase_ = size
lowerCAmelCase_ = [0] * size
elif arr is not None:
self.init(lowercase_ )
else:
raise ValueError('Either arr or size must be specified' )
def _lowercase ( self , lowercase_ ) -> None:
'''simple docstring'''
lowerCAmelCase_ = len(lowercase_ )
lowerCAmelCase_ = deepcopy(lowercase_ )
for i in range(1 , self.size ):
lowerCAmelCase_ = self.next_(lowercase_ )
if j < self.size:
self.tree[j] += self.tree[i]
def _lowercase ( self ) -> list[int]:
'''simple docstring'''
lowerCAmelCase_ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowerCAmelCase_ = self.next_(lowercase_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _lowercase ( lowercase_ ) -> int:
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def _lowercase ( lowercase_ ) -> int:
'''simple docstring'''
return index - (index & (-index))
def _lowercase ( self , lowercase_ , lowercase_ ) -> None:
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCAmelCase_ = self.next_(lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ ) -> None:
'''simple docstring'''
self.add(lowercase_ , value - self.get(lowercase_ ) )
def _lowercase ( self , lowercase_ ) -> int:
'''simple docstring'''
if right == 0:
return 0
lowerCAmelCase_ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCAmelCase_ = self.prev(lowercase_ )
return result
def _lowercase ( self , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
return self.prefix(lowercase_ ) - self.prefix(lowercase_ )
def _lowercase ( self , lowercase_ ) -> int:
'''simple docstring'''
return self.query(lowercase_ , index + 1 )
def _lowercase ( self , lowercase_ ) -> int:
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
lowerCAmelCase_ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCAmelCase_ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class a_ ( a_ ):
'''simple docstring'''
__a: str = ['''vqvae''']
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ , mel=lowercase_ , vqvae=lowercase_ )
def _lowercase ( self ) -> int:
'''simple docstring'''
return 5_0 if isinstance(self.scheduler , lowercase_ ) else 1_0_0_0
@torch.no_grad()
def __call__( self , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
lowerCAmelCase_ = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowercase_ )
lowerCAmelCase_ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowerCAmelCase_ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowerCAmelCase_ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowercase_ , device=self.device , )
lowerCAmelCase_ = noise
lowerCAmelCase_ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowercase_ , lowercase_ )
lowerCAmelCase_ = self.mel.audio_slice_to_image(lowercase_ )
lowerCAmelCase_ = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
lowerCAmelCase_ = (input_image / 2_5_5) * 2 - 1
lowerCAmelCase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowerCAmelCase_ = self.vqvae.encode(torch.unsqueeze(lowercase_ , 0 ) ).latent_dist.sample(
generator=lowercase_ )[0]
lowerCAmelCase_ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , self.scheduler.timesteps[start_step - 1] )
lowerCAmelCase_ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowerCAmelCase_ = int(mask_start_secs * pixels_per_second )
lowerCAmelCase_ = int(mask_end_secs * pixels_per_second )
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowercase_ ):
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ , lowercase_ )['sample']
else:
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample']
if isinstance(self.scheduler , lowercase_ ):
lowerCAmelCase_ = self.scheduler.step(
model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , eta=lowercase_ , generator=lowercase_ , )['prev_sample']
else:
lowerCAmelCase_ = self.scheduler.step(
model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
lowerCAmelCase_ = mask[:, step, :, :mask_start]
if mask_end > 0:
lowerCAmelCase_ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowerCAmelCase_ = 1 / self.vqvae.config.scaling_factor * images
lowerCAmelCase_ = self.vqvae.decode(lowercase_ )['sample']
lowerCAmelCase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowerCAmelCase_ = (images * 2_5_5).round().astype('uint8' )
lowerCAmelCase_ = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowercase_ , mode='RGB' ).convert('L' ) for _ in images) )
lowerCAmelCase_ = [self.mel.image_to_audio(lowercase_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowercase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase_ ) )
@torch.no_grad()
def _lowercase ( self , lowercase_ , lowercase_ = 5_0 ) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , lowercase_ )
self.scheduler.set_timesteps(lowercase_ )
lowerCAmelCase_ = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
lowerCAmelCase_ = (sample / 2_5_5) * 2 - 1
lowerCAmelCase_ = torch.Tensor(lowercase_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowerCAmelCase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowerCAmelCase_ = self.scheduler.alphas_cumprod[t]
lowerCAmelCase_ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowerCAmelCase_ = 1 - alpha_prod_t
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample']
lowerCAmelCase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowerCAmelCase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowerCAmelCase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _lowercase ( lowercase_ , lowercase_ , lowercase_ ) -> torch.Tensor:
'''simple docstring'''
lowerCAmelCase_ = acos(torch.dot(torch.flatten(lowercase_ ) , torch.flatten(lowercase_ ) ) / torch.norm(lowercase_ ) / torch.norm(lowercase_ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowercase_ ) + sin(alpha * theta ) * xa / sin(lowercase_ )
| 14 | 1 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowercase ( A_ )-> int:
'''simple docstring'''
a : Any = filter(lambda A_ : p.requires_grad , model.parameters() )
a : Optional[int] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__lowercase = logging.getLogger(__name__)
def lowercase ( A_ , A_ )-> Dict:
'''simple docstring'''
if metric == "rouge2":
a : Union[str, Any] = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
a : Any = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
a : Optional[int] = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
a : Optional[Any] = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
" function." )
a : Union[str, Any] = ModelCheckpoint(
dirpath=A_ , filename=A_ , monitor=F'''val_{metric}''' , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowercase ( A_ , A_ )-> List[str]:
'''simple docstring'''
return EarlyStopping(
monitor=F'''val_{metric}''' , mode="min" if "loss" in metric else "max" , patience=A_ , verbose=A_ , )
class _A ( pl.Callback ):
"""simple docstring"""
def __snake_case ( self : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : int):
a : Optional[Any] = {f'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(__UpperCAmelCase)
@rank_zero_only
def __snake_case ( self : Tuple , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : pl.LightningModule , __UpperCAmelCase : str , __UpperCAmelCase : Any=True):
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''')
a : Dict = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]})
# Log results
a : Optional[Any] = Path(pl_module.hparams.output_dir)
if type_path == "test":
a : str = od / "test_results.txt"
a : str = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
a : str = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
a : List[Any] = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=__UpperCAmelCase)
generations_file.parent.mkdir(exist_ok=__UpperCAmelCase)
with open(__UpperCAmelCase , "a+") as writer:
for key in sorted(__UpperCAmelCase):
if key in ["log", "progress_bar", "preds"]:
continue
a : List[str] = metrics[key]
if isinstance(__UpperCAmelCase , torch.Tensor):
a : str = val.item()
a : Dict = f'''{key}: {val:.6f}\n'''
writer.write(__UpperCAmelCase)
if not save_generations:
return
if "preds" in metrics:
a : Any = "\n".join(metrics["preds"])
generations_file.open("w+").write(__UpperCAmelCase)
@rank_zero_only
def __snake_case ( self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict):
try:
a : Optional[Any] = pl_module.model.model.num_parameters()
except AttributeError:
a : Any = pl_module.model.num_parameters()
a : Dict = count_trainable_parameters(__UpperCAmelCase)
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6})
@rank_zero_only
def __snake_case ( self : str , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : pl.LightningModule):
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(__UpperCAmelCase , __UpperCAmelCase , "test")
@rank_zero_only
def __snake_case ( self : Dict , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : Union[str, Any]):
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 40 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase ( A_ )-> List[Any]:
'''simple docstring'''
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
class _A :
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : int):
a : List[Any] = metric_id
class _A :
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def __snake_case ( self : List[str]):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any:
'''simple docstring'''
if "tmp_path" in args:
a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ):
func(*A_ )
| 40 | 1 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class lowerCAmelCase__ ( datasets.BuilderConfig ):
lowerCAmelCase_ = None
class lowerCAmelCase__ ( datasets.ArrowBasedBuilder ):
lowerCAmelCase_ = PandasConfig
def _snake_case ( self ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowercase_ : List[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ):
lowercase_ : Optional[Any] = data_files
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase_ : Any = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowercase_ : Dict = []
for split_name, files in data_files.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase_ : List[str] = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files]
splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) )
return splits
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase_ : str = table_cast(__SCREAMING_SNAKE_CASE , self.config.features.arrow_schema )
return pa_table
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ):
with open(__SCREAMING_SNAKE_CASE , '''rb''' ) as f:
lowercase_ : Dict = pa.Table.from_pandas(pd.read_pickle(__SCREAMING_SNAKE_CASE ) )
yield i, self._cast_table(__SCREAMING_SNAKE_CASE )
| 264 |
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release:
# old versions of hfh don't url-encode the file path
lowercase_ : int = quote(__SCREAMING_SNAKE_CASE )
return hfh.hf_hub_url(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='''dataset''' , revision=__SCREAMING_SNAKE_CASE )
| 264 | 1 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = """ylacombe/bark-small"""
snake_case : Union[str, Any] = tempfile.mkdtemp()
snake_case : Dict = """en_speaker_1"""
snake_case : Any = """This is a test string"""
snake_case : str = """speaker_embeddings_path.json"""
snake_case : Union[str, Any] = """speaker_embeddings"""
def lowerCamelCase_ ( self , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = self.get_tokenizer()
snake_case : List[Any] = BarkProcessor(tokenizer=UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
snake_case : Any = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
snake_case : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case : str = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Union[str, Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
snake_case : Dict = 35
snake_case : Any = 2
snake_case : str = 8
snake_case : Tuple = {
"""semantic_prompt""": np.ones(UpperCAmelCase__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
snake_case : Optional[int] = processor(text=self.input_string , voice_preset=UpperCAmelCase__ )
snake_case : Any = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
snake_case : str = os.path.join(self.tmpdirname , "file.npz" )
np.savez(UpperCAmelCase__ , **UpperCAmelCase__ )
snake_case : str = processor(text=self.input_string , voice_preset=UpperCAmelCase__ )
snake_case : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
snake_case : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Union[str, Any] = self.get_tokenizer()
snake_case : Union[str, Any] = BarkProcessor(tokenizer=UpperCAmelCase__ )
snake_case : Any = processor(text=self.input_string )
snake_case : List[str] = tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 148 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class UpperCamelCase ( nn.Module ):
def __init__( self : Union[str, Any] ) -> int:
super().__init__()
_a : Optional[Any] = nn.Linear(3 , 4 )
_a : Tuple = nn.BatchNormad(4 )
_a : Dict = nn.Linear(4 , 5 )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> int:
return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase__ ) ) )
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Any , UpperCAmelCase__ : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
return (args[0] + 1,) + args[1:], kwargs
class UpperCamelCase ( snake_case_ ):
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
return output + 1
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Dict ) -> str:
_a : List[Any] = ModelForTest()
_a : str = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(test_model._hf_hook , UpperCAmelCase__ )
self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) )
self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
def _lowercase ( self : Optional[int] ) -> Optional[int]:
_a : Dict = ModelForTest()
_a : Dict = ModelHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ , append=UpperCAmelCase__ )
self.assertEqual(isinstance(test_model._hf_hook , UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(UpperCAmelCase__ )
self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) )
self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) )
def _lowercase ( self : Dict ) -> int:
_a : str = ModelForTest()
_a : List[Any] = torch.randn(2 , 3 )
_a : Optional[Any] = test_model(x + 1 )
_a : str = test_model(x + 2 )
_a : Union[str, Any] = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a : int = PreForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : str = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a : int = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Tuple = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 )
def _lowercase ( self : Tuple ) -> int:
_a : Tuple = ModelForTest()
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : Optional[int] = test_model(UpperCAmelCase__ )
_a : int = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
_a : List[Any] = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Dict = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
_a : Any = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Optional[int] = test_model(UpperCAmelCase__ )
assert torch.allclose(UpperCAmelCase__ , output + 2 , atol=1E-5 )
def _lowercase ( self : Dict ) -> Optional[Any]:
_a : Any = ModelForTest()
_a : List[Any] = torch.randn(2 , 3 )
_a : Dict = test_model(UpperCAmelCase__ )
_a : Any = PostForwardHook()
add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ )
_a : List[str] = test_model(UpperCAmelCase__ )
self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 ) )
self.assertTrue(outputa.requires_grad )
_a : Any = True
_a : Union[str, Any] = test_model(UpperCAmelCase__ )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _lowercase ( self : Optional[Any] ) -> str:
_a : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
_a : Optional[int] = torch.randn(2 , 3 )
_a : Any = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(UpperCAmelCase__ , AlignDevicesHook(io_same_device=UpperCAmelCase__ ) )
_a : str = torch.randn(2 , 3 ).to(0 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , torch.device(0 ) )
def _lowercase ( self : str ) -> Union[str, Any]:
_a : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : List[Any] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Dict = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : int = torch.randn(2 , 3 )
_a : str = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
_a : List[str] = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : Tuple = torch.randn(2 , 3 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _lowercase ( self : Tuple ) -> List[str]:
_a : str = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Dict = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : List[Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , offload_buffers=UpperCAmelCase__ )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : List[str] = torch.randn(2 , 3 )
_a : Union[str, Any] = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _lowercase ( self : Dict ) -> str:
_a : Optional[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
_a : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
_a : Union[str, Any] = torch.device(UpperCAmelCase__ )
self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ )
_a : Union[str, Any] = torch.randn(2 , 3 )
_a : int = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() , offload_buffers=UpperCAmelCase__ , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
_a : Any = torch.randn(2 , 3 )
_a : int = model(UpperCAmelCase__ )
self.assertEqual(output.device , UpperCAmelCase__ )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCAmelCase__ )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 294 | 0 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = field(default=UpperCamelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
snake_case_ = field(
default=UpperCamelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
snake_case_ = field(
default=UpperCamelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
snake_case_ = field(
default=UpperCamelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
snake_case_ = field(
default=UpperCamelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : str = super().to_dict()
for k, v in d.items():
if isinstance(snake_case , snake_case ):
A__ : Any = v.to_dict()
return d
| 296 |
"""simple docstring"""
import os
from distutils.util import strtobool
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Optional[Any] ) ->List[str]:
for e in env_keys:
A__ : List[Any] = int(os.environ.get(UpperCAmelCase__, -1 ) )
if val >= 0:
return val
return default
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : str=False ) ->List[str]:
A__ : List[Any] = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return strtobool(UpperCAmelCase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]="no" ) ->int:
A__ : str = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return value
| 296 | 1 |
from math import isqrt
def A_ ( a ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(a ) + 1 ) )
def A_ ( a = 1_0**6 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(a )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 253 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _A ( __magic_name__ , unittest.TestCase):
SCREAMING_SNAKE_CASE : str = AudioLDMPipeline
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_AUDIO_PARAMS
SCREAMING_SNAKE_CASE : Optional[int] = TEXT_TO_AUDIO_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Dict = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
])
def UpperCAmelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Any = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
SCREAMING_SNAKE_CASE_ : List[str] = ClapTextModelWithProjection(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77 )
SCREAMING_SNAKE_CASE_ : Any = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : List[str] = SpeechTaHifiGan(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[Any] = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 256
SCREAMING_SNAKE_CASE_ : int = audio[:10]
SCREAMING_SNAKE_CASE_ : str = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : str = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.audios[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE_ : str = audioldm_pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ : List[str] = text_inputs['input_ids'].to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe.text_encoder(
_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : List[str] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE_ : Tuple = F.normalize(_SCREAMING_SNAKE_CASE , dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = prompt_embeds
# forward
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE_ : str = negative_prompt
SCREAMING_SNAKE_CASE_ : List[Any] = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = output.audios[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE_ : List[str] = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = text_inputs['input_ids'].to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = audioldm_pipe.text_encoder(
_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE_ : Any = F.normalize(_SCREAMING_SNAKE_CASE , dim=-1 )
embeds.append(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = embeds
# forward
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = 'egg cracking'
SCREAMING_SNAKE_CASE_ : str = audioldm_pipe(**_SCREAMING_SNAKE_CASE , negative_prompt=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 256
SCREAMING_SNAKE_CASE_ : Optional[Any] = audio[:10]
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : str = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Dict = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=2 , num_waveforms_per_prompt=_SCREAMING_SNAKE_CASE ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE_ : str = 2
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_SCREAMING_SNAKE_CASE ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : str = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = audioldm_pipe(audio_length_in_s=0.016 , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(audio_length_in_s=0.032 , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) / vocoder_sampling_rate == 0.032
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AudioLDMPipeline(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = ['hey']
SCREAMING_SNAKE_CASE_ : Dict = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.audios.shape
assert audio_shape == (1, 256)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE_ : int = SpeechTaHifiGan(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = audioldm_pipe(_SCREAMING_SNAKE_CASE , num_inference_steps=1 )
SCREAMING_SNAKE_CASE_ : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def UpperCAmelCase ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_SCREAMING_SNAKE_CASE )
@slow
class _A ( unittest.TestCase):
def UpperCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="cpu" , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.random.RandomState(_SCREAMING_SNAKE_CASE ).standard_normal((1, 8, 128, 16) )
SCREAMING_SNAKE_CASE_ : Tuple = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE_ : Optional[int] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = 25
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audioldm_pipe(**_SCREAMING_SNAKE_CASE ).audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 8_1920
SCREAMING_SNAKE_CASE_ : Any = audio[7_7230:7_7240]
SCREAMING_SNAKE_CASE_ : List[Any] = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
SCREAMING_SNAKE_CASE_ : int = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE_ : Optional[int] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : List[str] = audioldm_pipe.to(_SCREAMING_SNAKE_CASE )
audioldm_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_inputs(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = audioldm_pipe(**_SCREAMING_SNAKE_CASE ).audios[0]
assert audio.ndim == 1
assert len(_SCREAMING_SNAKE_CASE ) == 8_1920
SCREAMING_SNAKE_CASE_ : Union[str, Any] = audio[2_7780:2_7790]
SCREAMING_SNAKE_CASE_ : List[str] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
SCREAMING_SNAKE_CASE_ : str = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 253 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCamelCase = logging.get_logger(__name__)
# General docstring
UpperCamelCase = 'RegNetConfig'
# Base docstring
UpperCamelCase = 'facebook/regnet-y-040'
UpperCamelCase = [1, 1088, 7, 7]
# Image classification docstring
UpperCamelCase = 'facebook/regnet-y-040'
UpperCamelCase = 'tabby, tabby cat'
UpperCamelCase = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 3 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[str] = "relu" , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
A: Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
A: List[str] = tf.keras.layers.ConvaD(
filters=_SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , strides=_SCREAMING_SNAKE_CASE , padding='''VALID''' , groups=_SCREAMING_SNAKE_CASE , use_bias=_SCREAMING_SNAKE_CASE , name='''convolution''' , )
A: Union[str, Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
A: Tuple = ACTaFN[activation] if activation is not None else tf.identity
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Dict:
'''simple docstring'''
A: str = self.convolution(self.padding(_SCREAMING_SNAKE_CASE ) )
A: Tuple = self.normalization(_SCREAMING_SNAKE_CASE )
A: int = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
A: Optional[Any] = config.num_channels
A: List[Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
'''simple docstring'''
A: Union[str, Any] = shape_list(_SCREAMING_SNAKE_CASE )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
A: Union[str, Any] = tf.transpose(_SCREAMING_SNAKE_CASE , perm=(0, 2, 3, 1) )
A: Union[str, Any] = self.embedder(_SCREAMING_SNAKE_CASE )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : int ) -> Any:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
A: Any = tf.keras.layers.ConvaD(
filters=_SCREAMING_SNAKE_CASE , kernel_size=1 , strides=_SCREAMING_SNAKE_CASE , use_bias=_SCREAMING_SNAKE_CASE , name='''convolution''' )
A: List[str] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' )
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False ) -> tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(_SCREAMING_SNAKE_CASE ) , training=_SCREAMING_SNAKE_CASE )
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
A: List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_SCREAMING_SNAKE_CASE , name='''pooler''' )
A: Dict = [
tf.keras.layers.ConvaD(filters=_SCREAMING_SNAKE_CASE , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=_SCREAMING_SNAKE_CASE , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
'''simple docstring'''
A: Optional[int] = self.pooler(_SCREAMING_SNAKE_CASE )
for layer_module in self.attention:
A: Union[str, Any] = layer_module(_SCREAMING_SNAKE_CASE )
A: List[str] = hidden_state * pooled
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
A: Optional[int] = in_channels != out_channels or stride != 1
A: Any = max(1 , out_channels // config.groups_width )
A: Union[str, Any] = (
TFRegNetShortCut(_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
A: Tuple = [
TFRegNetConvLayer(_SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , groups=_SCREAMING_SNAKE_CASE , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(_SCREAMING_SNAKE_CASE , kernel_size=1 , activation=_SCREAMING_SNAKE_CASE , name='''layer.2''' ),
]
A: Dict = ACTaFN[config.hidden_act]
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
'''simple docstring'''
A: str = hidden_state
for layer_module in self.layers:
A: Dict = layer_module(_SCREAMING_SNAKE_CASE )
A: str = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
A: Optional[Any] = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 1 , **SCREAMING_SNAKE_CASE_ : int ) -> str:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
A: Any = in_channels != out_channels or stride != 1
A: Any = max(1 , out_channels // config.groups_width )
A: Any = (
TFRegNetShortCut(_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
A: Union[str, Any] = [
TFRegNetConvLayer(_SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , groups=_SCREAMING_SNAKE_CASE , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(_SCREAMING_SNAKE_CASE , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(_SCREAMING_SNAKE_CASE , kernel_size=1 , activation=_SCREAMING_SNAKE_CASE , name='''layer.3''' ),
]
A: Union[str, Any] = ACTaFN[config.hidden_act]
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ) -> List[str]:
'''simple docstring'''
A: Optional[int] = hidden_state
for layer_module in self.layers:
A: Tuple = layer_module(_SCREAMING_SNAKE_CASE )
A: int = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
A: Optional[int] = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE_ : RegNetConfig , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , **SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[int]:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
A: Any = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
A: Union[str, Any] = [
# downsampling is done in the first layer with stride of 2
layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , name='''layers.0''' ),
*[layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Dict ) -> str:
'''simple docstring'''
for layer_module in self.layers:
A: Optional[int] = layer_module(_SCREAMING_SNAKE_CASE )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE_ : RegNetConfig , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
A: Union[str, Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_SCREAMING_SNAKE_CASE , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
A: Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_SCREAMING_SNAKE_CASE , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , depth=_SCREAMING_SNAKE_CASE , name=f"""stages.{i+1}""" ) )
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = True ) -> TFBaseModelOutputWithNoAttention:
'''simple docstring'''
A: List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A: Tuple = hidden_states + (hidden_state,)
A: Dict = stage_module(_SCREAMING_SNAKE_CASE )
if output_hidden_states:
A: Optional[int] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=_SCREAMING_SNAKE_CASE )
@keras_serializable
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
'''simple docstring'''
UpperCamelCase_ : Any = RegNetConfig
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
A: List[str] = config
A: List[Any] = TFRegNetEmbeddings(_SCREAMING_SNAKE_CASE , name='''embedder''' )
A: Optional[Any] = TFRegNetEncoder(_SCREAMING_SNAKE_CASE , name='''encoder''' )
A: Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_SCREAMING_SNAKE_CASE , name='''pooler''' )
@unpack_inputs
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : bool = False , ) -> TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
A: Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A: Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
A: Dict = self.embedder(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
A: Dict = self.encoder(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
A: Optional[Any] = encoder_outputs[0]
A: Optional[Any] = self.pooler(_SCREAMING_SNAKE_CASE )
# Change to NCHW output format have uniformity in the modules
A: Optional[Any] = tf.transpose(_SCREAMING_SNAKE_CASE , perm=(0, 3, 1, 2) )
A: Tuple = tf.transpose(_SCREAMING_SNAKE_CASE , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
A: Dict = tuple([tf.transpose(_SCREAMING_SNAKE_CASE , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , pooler_output=_SCREAMING_SNAKE_CASE , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase_ : Tuple = RegNetConfig
UpperCamelCase_ : Dict = """regnet"""
UpperCamelCase_ : Dict = """pixel_values"""
@property
def _snake_case ( self : Tuple ) -> Any:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
UpperCamelCase = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
UpperCamelCase = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , __SCREAMING_SNAKE_CASE , )
class lowerCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any:
'''simple docstring'''
super().__init__(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
A: List[str] = TFRegNetMainLayer(_SCREAMING_SNAKE_CASE , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : tf.Tensor , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : List[str]=False , ) -> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
A: Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A: int = return_dict if return_dict is not None else self.config.use_return_dict
A: Optional[Any] = self.regnet(
pixel_values=_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n """ , __SCREAMING_SNAKE_CASE , )
class lowerCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : RegNetConfig , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
A: List[str] = config.num_labels
A: Dict = TFRegNetMainLayer(_SCREAMING_SNAKE_CASE , name='''regnet''' )
# classification head
A: int = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : tf.Tensor = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : List[str]=False , ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
A: Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A: Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
A: Any = self.regnet(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
A: Tuple = outputs.pooler_output if return_dict else outputs[1]
A: str = self.classifier[0](_SCREAMING_SNAKE_CASE )
A: Optional[Any] = self.classifier[1](_SCREAMING_SNAKE_CASE )
A: Union[str, Any] = None if labels is None else self.hf_compute_loss(labels=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE )
if not return_dict:
A: List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states )
| 364 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : int , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Dict ) -> None:
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 334 | 0 |
'''simple docstring'''
import qiskit
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Dict:
A: Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
A: Any = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
A: List[Any] = qiskit.execute(_A , _A , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_A )
if __name__ == "__main__":
UpperCamelCase = half_adder(1, 1)
print(f'Half Adder Output Qubit Counts: {counts}')
| 319 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = 384
lowerCAmelCase_ = 7
if "tiny" in model_name:
lowerCAmelCase_ = 96
lowerCAmelCase_ = (2, 2, 6, 2)
lowerCAmelCase_ = (3, 6, 12, 24)
elif "small" in model_name:
lowerCAmelCase_ = 96
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (3, 6, 12, 24)
elif "base" in model_name:
lowerCAmelCase_ = 128
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (4, 8, 16, 32)
lowerCAmelCase_ = 12
lowerCAmelCase_ = 512
elif "large" in model_name:
lowerCAmelCase_ = 192
lowerCAmelCase_ = (2, 2, 18, 2)
lowerCAmelCase_ = (6, 12, 24, 48)
lowerCAmelCase_ = 12
lowerCAmelCase_ = 768
# set label information
lowerCAmelCase_ = 150
lowerCAmelCase_ = '''huggingface/label-files'''
lowerCAmelCase_ = '''ade20k-id2label.json'''
lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()}
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = SwinConfig(
embed_dim=_A , depths=_A , num_heads=_A , window_size=_A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
lowerCAmelCase_ = UperNetConfig(
backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , )
return config
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", f"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", f"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.weight", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.norm2.bias", f"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", f"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", f"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.stages.{i}.downsample.reduction.weight", f"backbone.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.weight", f"backbone.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.stages.{i}.downsample.norm.bias", f"backbone.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = dct.pop(_A )
lowerCAmelCase_ = val
def __UpperCamelCase ( _A , _A ):
lowerCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" )
lowerCAmelCase_ = state_dict.pop(f"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[:dim, :]
lowerCAmelCase_ = in_proj_bias[: dim]
lowerCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
lowerCAmelCase_ = in_proj_bias[
dim : dim * 2
]
lowerCAmelCase_ = in_proj_weight[
-dim :, :
]
lowerCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = x.shape
lowerCAmelCase_ = x.reshape(_A , 4 , in_channel // 4 )
lowerCAmelCase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_A , _A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = x.shape
lowerCAmelCase_ = x.reshape(_A , in_channel // 4 , 4 )
lowerCAmelCase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_A , _A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = x.shape[0]
lowerCAmelCase_ = x.reshape(4 , in_channel // 4 )
lowerCAmelCase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_A )
return x
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = x.shape[0]
lowerCAmelCase_ = x.reshape(in_channel // 4 , 4 )
lowerCAmelCase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_A )
return x
def __UpperCamelCase ( _A , _A , _A ):
lowerCAmelCase_ = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
lowerCAmelCase_ = model_name_to_url[model_name]
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , file_name=_A )[
'''state_dict'''
]
for name, param in state_dict.items():
print(_A , param.shape )
lowerCAmelCase_ = get_upernet_config(_A )
lowerCAmelCase_ = UperNetForSemanticSegmentation(_A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCAmelCase_ = state_dict.pop(_A )
if "bn" in key:
lowerCAmelCase_ = key.replace('''bn''' , '''batch_norm''' )
lowerCAmelCase_ = val
# rename keys
lowerCAmelCase_ = create_rename_keys(_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowerCAmelCase_ = reverse_correct_unfold_reduction_order(_A )
if "norm" in key:
lowerCAmelCase_ = reverse_correct_unfold_norm_order(_A )
model.load_state_dict(_A )
# verify on image
lowerCAmelCase_ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' )
lowerCAmelCase_ = SegformerImageProcessor()
lowerCAmelCase_ = processor(_A , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
lowerCAmelCase_ = model(_A )
lowerCAmelCase_ = outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowerCAmelCase_ = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] )
elif model_name == "upernet-swin-small":
lowerCAmelCase_ = torch.tensor(
[[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] )
elif model_name == "upernet-swin-base":
lowerCAmelCase_ = torch.tensor(
[[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] )
elif model_name == "upernet-swin-large":
lowerCAmelCase_ = torch.tensor(
[[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_A )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_A )
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(f"openmmlab/{model_name}" )
processor.push_to_hub(f"openmmlab/{model_name}" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[f"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 278 | 0 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[int] =IFPipeline
a_ : Union[str, Any] =TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
a_ : List[str] =TEXT_TO_IMAGE_BATCH_PARAMS
a_ : List[Any] =PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self._get_dummy_components()
def UpperCamelCase_ ( self : Dict , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any]=0 ):
'''simple docstring'''
if str(UpperCamelCase ).startswith('mps' ):
_snake_case : str = torch.manual_seed(UpperCamelCase )
else:
_snake_case : Optional[Any] = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
_snake_case : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
self._test_save_load_local()
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : int = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
_snake_case : int = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
_snake_case : Tuple = pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_snake_case : Dict = None
_snake_case : Dict = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_snake_case : Optional[int] = IFImgaImgPipeline(**pipe_a.components )
_snake_case : int = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_snake_case : str = IFInpaintingPipeline(**pipe_a.components )
_snake_case : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_start_torch_memory_measurement()
_snake_case : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_snake_case : str = pipe_a(
prompt_embeds=UpperCamelCase , negative_prompt_embeds=UpperCamelCase , num_inference_steps=2 , generator=UpperCamelCase , output_type='np' , )
_snake_case : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
_snake_case : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_snake_case : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
# pipeline 2
_start_torch_memory_measurement()
_snake_case : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_snake_case : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase )
_snake_case : Optional[int] = pipe_a(
prompt_embeds=UpperCamelCase , negative_prompt_embeds=UpperCamelCase , image=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=2 , output_type='np' , )
_snake_case : int = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_snake_case : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_snake_case : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
_snake_case : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase )
_snake_case : str = torch.Generator(device='cpu' ).manual_seed(0 )
_snake_case : str = pipe_a(
prompt_embeds=UpperCamelCase , negative_prompt_embeds=UpperCamelCase , image=UpperCamelCase , num_inference_steps=2 , generator=UpperCamelCase , output_type='np' , )
_snake_case : str = output.images[0]
assert image.shape == (64, 64, 3)
_snake_case : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_snake_case : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
# pipeline 2
_start_torch_memory_measurement()
_snake_case : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_snake_case : List[str] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(UpperCamelCase )
_snake_case : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase )
_snake_case : Optional[int] = pipe_a(
prompt_embeds=UpperCamelCase , negative_prompt_embeds=UpperCamelCase , image=UpperCamelCase , original_image=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=2 , output_type='np' , )
_snake_case : Optional[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_snake_case : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_snake_case : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : List[str] , UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
_start_torch_memory_measurement()
_snake_case : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase )
_snake_case : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(UpperCamelCase )
_snake_case : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
_snake_case : Dict = pipe_a(
prompt_embeds=UpperCamelCase , negative_prompt_embeds=UpperCamelCase , image=UpperCamelCase , mask_image=UpperCamelCase , num_inference_steps=2 , generator=UpperCamelCase , output_type='np' , )
_snake_case : int = output.images[0]
assert image.shape == (64, 64, 3)
_snake_case : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_snake_case : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
# pipeline 2
_start_torch_memory_measurement()
_snake_case : Any = torch.Generator(device='cpu' ).manual_seed(0 )
_snake_case : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase )
_snake_case : Optional[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(UpperCamelCase )
_snake_case : List[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(UpperCamelCase )
_snake_case : int = pipe_a(
prompt_embeds=UpperCamelCase , negative_prompt_embeds=UpperCamelCase , image=UpperCamelCase , mask_image=UpperCamelCase , original_image=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=2 , output_type='np' , )
_snake_case : Tuple = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_snake_case : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_snake_case : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
def lowerCamelCase_ ( )-> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 363 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =RobertaTokenizer
a_ : Tuple =RobertaTokenizerFast
a_ : Union[str, Any] =True
a_ : List[Any] ={"""cls_token""": """<s>"""}
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case : Optional[int] = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
_snake_case : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case : List[str] = {'unk_token': '<unk>'}
_snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase ) )
def UpperCamelCase_ ( self : List[str] , **UpperCamelCase : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = 'lower newer'
_snake_case : int = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case : List[str] = 'lower newer'
_snake_case : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_snake_case : Any = tokenizer.tokenize(UpperCamelCase ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase , UpperCamelCase )
_snake_case : Any = tokens + [tokenizer.unk_token]
_snake_case : Dict = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Any = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=UpperCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=UpperCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer_class.from_pretrained('roberta-base' )
_snake_case : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=UpperCamelCase )
_snake_case : int = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCamelCase )
_snake_case : Dict = tokenizer.encode(
'sequence builders' , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : Optional[int] = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
_snake_case : Tuple = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizer()
_snake_case : int = 'Encode this sequence.'
_snake_case : str = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
_snake_case : int = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase , UpperCamelCase )
_snake_case : Optional[int] = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase , add_prefix_space=UpperCamelCase )
_snake_case : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase , UpperCamelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_snake_case : List[Any] = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase )
_snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase , UpperCamelCase )
# Testing spaces after special tokens
_snake_case : Dict = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase )} ) # mask token has a left space
_snake_case : int = tokenizer.convert_tokens_to_ids(UpperCamelCase )
_snake_case : List[Any] = 'Encode <mask> sequence'
_snake_case : Any = 'Encode <mask>sequence'
_snake_case : Optional[int] = tokenizer.encode(UpperCamelCase )
_snake_case : str = encoded.index(UpperCamelCase )
_snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase , UpperCamelCase )
_snake_case : Tuple = tokenizer.encode(UpperCamelCase )
_snake_case : Tuple = encoded.index(UpperCamelCase )
_snake_case : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Any = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
_snake_case : Union[str, Any] = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase )
_snake_case : Tuple = 'A, <mask> AllenNLP sentence.'
_snake_case : str = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
_snake_case : Optional[int] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case : Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_snake_case : int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_snake_case : Dict = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , UpperCamelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , UpperCamelCase )
self.assertEqual(post_processor_state['trim_offsets'] , UpperCamelCase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : List[str] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_snake_case : Tuple = f"""{text_of_1_token} {text_of_1_token}"""
_snake_case : int = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : int = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase ) + 1, len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Tuple = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase ) + 1, len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Optional[int] = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase ), len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : Any = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Tuple = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase ), len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : str = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Dict = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase ) + 1, 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : str = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase ), 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
_snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase , use_fast=UpperCamelCase , add_prefix_space=UpperCamelCase , trim_offsets=UpperCamelCase )
_snake_case : Any = tokenizer_r(UpperCamelCase , return_offsets_mapping=UpperCamelCase , add_special_tokens=UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase ), 1 + len(UpperCamelCase ) + 1 + len(UpperCamelCase )) , )
| 260 | 0 |
from string import ascii_uppercase
_UpperCAmelCase : List[str] = {str(ord(c) - 55): c for c in ascii_uppercase}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
lowerCamelCase__ : Optional[Any] = ''
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Dict = 0
while div != 1:
lowerCamelCase__ , lowerCamelCase__ : List[str] = divmod(_UpperCAmelCase , _UpperCAmelCase )
if base >= 11 and 9 < mod < 36:
lowerCamelCase__ : Dict = ALPHABET_VALUES[str(_UpperCAmelCase )]
else:
lowerCamelCase__ : int = str(_UpperCAmelCase )
new_value += actual_value
lowerCamelCase__ : List[Any] = num // base
lowerCamelCase__ : Optional[int] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_UpperCAmelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(10_00):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 50 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class A__ ( __magic_name__ ):
lowercase = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowercase = Features({'audio': Audio()} )
lowercase = Features({'labels': ClassLabel} )
lowercase = "audio"
lowercase = "labels"
def _lowerCamelCase ( self : Dict , a : Tuple ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , a ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowerCAmelCase__ : Tuple = copy.deepcopy(self )
lowerCAmelCase__ : List[Any] = self.label_schema.copy()
lowerCAmelCase__ : List[Any] = features[self.label_column]
lowerCAmelCase__ : Optional[int] = label_schema
return task_template
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
} | 212 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
A_ : Optional[Any] =logging.get_logger(__name__)
A_ : List[str] ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A_ : str =[
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
A_ : Tuple ={
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
A_ : Any ={f'funnel-transformer/{name}': 5_1_2 for name in _model_names}
A_ : Optional[Any] ={f'funnel-transformer/{name}': {"""do_lower_case""": True} for name in _model_names}
class __a ( __a ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : str = FunnelTokenizer
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : int = 2
def __init__( self , a__=None , a__=None , a__=True , a__="<unk>" , a__="<sep>" , a__="<pad>" , a__="<cls>" , a__="<mask>" , a__="<s>" , a__="</s>" , a__=True , a__=True , a__=None , a__="##" , **a__ , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , bos_token=a__ , eos_token=a__ , clean_text=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , wordpieces_prefix=a__ , **a__ , )
_lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , a__ ) != do_lower_case
or normalizer_state.get('strip_accents' , a__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , a__ ) != tokenize_chinese_chars
):
_lowerCamelCase = getattr(a__ , normalizer_state.pop('type' ) )
_lowerCamelCase = do_lower_case
_lowerCamelCase = strip_accents
_lowerCamelCase = tokenize_chinese_chars
_lowerCamelCase = normalizer_class(**a__ )
_lowerCamelCase = do_lower_case
def snake_case_ ( self , a__ , a__=None ):
_lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
| 359 |
"""simple docstring"""
from math import factorial, pi
def SCREAMING_SNAKE_CASE_ ( snake_case : float , snake_case : int = 30 )-> float:
if not isinstance(snake_case , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(snake_case , snake_case ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
_lowerCamelCase = float(snake_case )
_lowerCamelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(snake_case ) )
def SCREAMING_SNAKE_CASE_ ( snake_case : float , snake_case : int = 30 )-> float:
if not isinstance(snake_case , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(snake_case , snake_case ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
_lowerCamelCase = float(snake_case )
_lowerCamelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 80 | 0 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowercase__ :int = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowercase__ :Tuple = concatenate_datasets
lowercase__ :List[str] = DownloadConfig
lowercase__ :Optional[int] = DownloadManager
lowercase__ :Optional[int] = DownloadMode
lowercase__ :Any = DownloadConfig
lowercase__ :str = DownloadMode
lowercase__ :List[str] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 101 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase__ : int | None = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = value
__SCREAMING_SNAKE_CASE : Node | None = None # Added in order to delete a node easier
__SCREAMING_SNAKE_CASE : Node | None = None
__SCREAMING_SNAKE_CASE : Node | None = None
def __repr__( self : Optional[Any] ):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"{self.value}": (self.left, self.right)} , indent=1 )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase__ : Node | None = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = root
def __str__( self : Union[str, Any] ):
"""simple docstring"""
return str(self.root )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Node , lowerCAmelCase__ : Node | None ):
"""simple docstring"""
if new_children is not None: # reset its kids
__SCREAMING_SNAKE_CASE : List[str] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCAmelCase__ ): # If it is the right children
__SCREAMING_SNAKE_CASE : Any = new_children
else:
__SCREAMING_SNAKE_CASE : int = new_children
else:
__SCREAMING_SNAKE_CASE : int = new_children
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : Node ):
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
return self.root is None
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = Node(lowerCAmelCase__ ) # create a new Node
if self.empty(): # if Tree is empty
__SCREAMING_SNAKE_CASE : Optional[int] = new_node # set its root
else: # Tree is not empty
__SCREAMING_SNAKE_CASE : Optional[int] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
__SCREAMING_SNAKE_CASE : List[str] = new_node # We insert the new node in a leaf
break
else:
__SCREAMING_SNAKE_CASE : Any = parent_node.left
else:
if parent_node.right is None:
__SCREAMING_SNAKE_CASE : Tuple = new_node
break
else:
__SCREAMING_SNAKE_CASE : List[str] = parent_node.right
__SCREAMING_SNAKE_CASE : Tuple = parent_node
def UpperCamelCase__ ( self : str , *lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
for value in values:
self.__insert(lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
__SCREAMING_SNAKE_CASE : List[Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
__SCREAMING_SNAKE_CASE : Any = node.left if value < node.value else node.right
return node
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : Node | None = None ):
"""simple docstring"""
if node is None:
if self.root is None:
return None
__SCREAMING_SNAKE_CASE : Optional[Any] = self.root
if not self.empty():
while node.right is not None:
__SCREAMING_SNAKE_CASE : Tuple = node.right
return node
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Node | None = None ):
"""simple docstring"""
if node is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.root
if self.root is None:
return None
if not self.empty():
__SCREAMING_SNAKE_CASE : Optional[Any] = self.root
while node.left is not None:
__SCREAMING_SNAKE_CASE : Any = node.left
return node
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = self.search(lowerCAmelCase__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCAmelCase__ , lowerCAmelCase__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCAmelCase__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCAmelCase__ , node.left )
else:
__SCREAMING_SNAKE_CASE : Tuple = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
__SCREAMING_SNAKE_CASE : Optional[Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : Node | None ):
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : Optional[Any]=None ):
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : list , lowerCAmelCase__ : Node | None ):
"""simple docstring"""
if node:
self.inorder(lowerCAmelCase__ , node.left )
arr.append(node.value )
self.inorder(lowerCAmelCase__ , node.right )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Node ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : list[int] = []
self.inorder(lowerCAmelCase__ , lowerCAmelCase__ ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCAmelCase_ ( _lowerCamelCase: Node | None ):
__SCREAMING_SNAKE_CASE : Optional[Any] = []
if curr_node is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : str = (8, 3, 6, 1, 10, 14, 13, 4, 7)
__SCREAMING_SNAKE_CASE : Dict = BinarySearchTree()
for i in testlist:
t.insert(_lowerCamelCase )
# Prints all the elements of the list in order traversal
print(_lowerCamelCase )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn't exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn't exist""" )
if not t.empty():
print("""Max Value: """ , t.get_max().value ) # type: ignore
print("""Min Value: """ , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_lowerCamelCase )
print(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 112 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
snake_case__ : str = (3, 9, -11, 0, 7, 5, 1, -1)
snake_case__ : Any = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCamelCase_ :int
lowerCamelCase_ :Node | None
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Node | None = None
for i in sorted(snake_case_ , reverse=snake_case_ ):
UpperCAmelCase_ : Any = Node(snake_case_ , self.head )
def __iter__( self ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.head
while node:
yield node.data
UpperCAmelCase_ : Any = node.next_node
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self ):
'''simple docstring'''
return " -> ".join([str(snake_case_ ) for node in self] )
def _lowerCamelCase ( lowerCamelCase_ : SortedLinkedList , lowerCamelCase_ : SortedLinkedList ):
"""simple docstring"""
return SortedLinkedList(list(lowerCamelCase_ ) + list(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : Optional[int] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 365 | '''simple docstring'''
snake_case__ : Optional[Any] = tuple[float, float, float]
snake_case__ : Tuple = tuple[float, float, float]
def _lowerCamelCase ( lowerCamelCase_ : Pointad , lowerCamelCase_ : Pointad ):
"""simple docstring"""
UpperCAmelCase_ : Any = end_pointa[0] - end_pointa[0]
UpperCAmelCase_ : Optional[Any] = end_pointa[1] - end_pointa[1]
UpperCAmelCase_ : Any = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _lowerCamelCase ( lowerCamelCase_ : Vectorad , lowerCamelCase_ : Vectorad ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i
UpperCAmelCase_ : Optional[Any] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
UpperCAmelCase_ : Dict = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _lowerCamelCase ( lowerCamelCase_ : Vectorad , lowerCamelCase_ : int ):
"""simple docstring"""
return tuple(round(lowerCamelCase_ , lowerCamelCase_ ) for x in vector ) == (0, 0, 0)
def _lowerCamelCase ( lowerCamelCase_ : Pointad , lowerCamelCase_ : Pointad , lowerCamelCase_ : Pointad , lowerCamelCase_ : int = 10 ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = create_vector(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = create_vector(lowerCamelCase_ , lowerCamelCase_ )
return is_zero_vector(get_ad_vectors_cross(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
| 274 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
SCREAMING_SNAKE_CASE :int = None
SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :str = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE :List[Any] = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE :Optional[Any] = {
'facebook/nllb-large-en-ro': 1024,
'facebook/nllb-200-distilled-600M': 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE :str = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class UpperCAmelCase ( A_ ):
'''simple docstring'''
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = NllbTokenizer
snake_case_ = []
snake_case_ = []
def __init__( self : Union[str, Any] ,A : Union[str, Any]=None ,A : int=None ,A : Any="<s>" ,A : List[Any]="</s>" ,A : List[str]="</s>" ,A : Optional[int]="<s>" ,A : str="<unk>" ,A : List[Any]="<pad>" ,A : int="<mask>" ,A : int=None ,A : Tuple=None ,A : str=None ,A : List[str]=False ,**A : Optional[int] ,):
__A = AddedToken(snake_case__ ,lstrip=snake_case__ ,rstrip=snake_case__ ) if isinstance(snake_case__ ,snake_case__ ) else mask_token
__A = legacy_behaviour
super().__init__(
vocab_file=snake_case__ ,tokenizer_file=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,sep_token=snake_case__ ,cls_token=snake_case__ ,unk_token=snake_case__ ,pad_token=snake_case__ ,mask_token=snake_case__ ,src_lang=snake_case__ ,tgt_lang=snake_case__ ,additional_special_tokens=snake_case__ ,legacy_behaviour=snake_case__ ,**snake_case__ ,)
__A = vocab_file
__A = False if not self.vocab_file else True
__A = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
__A = {
lang_code: self.convert_tokens_to_ids(snake_case__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__A = src_lang if src_lang is not None else "eng_Latn"
__A = self.convert_tokens_to_ids(self._src_lang )
__A = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase_ ( self : List[Any] ):
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self : int ,A : str ):
__A = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self : List[str] ,A : List[int] ,A : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self : Optional[Any] ,A : List[int] ,A : Optional[List[int]] = None ):
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self : Optional[int] ,A : Union[str, Any] ,A : str ,A : Optional[str] ,A : Optional[str] ,**A : str ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
__A = src_lang
__A = self(snake_case__ ,add_special_tokens=snake_case__ ,return_tensors=snake_case__ ,**snake_case__ )
__A = self.convert_tokens_to_ids(snake_case__ )
__A = tgt_lang_id
return inputs
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[str] ,A : str = "eng_Latn" ,A : Optional[List[str]] = None ,A : str = "fra_Latn" ,**A : List[str] ,):
__A = src_lang
__A = tgt_lang
return super().prepare_seqaseq_batch(snake_case__ ,snake_case__ ,**snake_case__ )
def UpperCamelCase_ ( self : Dict ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self : str ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self : Optional[int] ,A : List[Any] ):
__A = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
__A = []
__A = [self.eos_token_id, self.cur_lang_code]
else:
__A = [self.cur_lang_code]
__A = [self.eos_token_id]
__A = self.convert_ids_to_tokens(self.prefix_tokens )
__A = self.convert_ids_to_tokens(self.suffix_tokens )
__A = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str ,pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def UpperCamelCase_ ( self : int ,A : str ):
__A = self.convert_tokens_to_ids(snake_case__ )
if self.legacy_behaviour:
__A = []
__A = [self.eos_token_id, self.cur_lang_code]
else:
__A = [self.cur_lang_code]
__A = [self.eos_token_id]
__A = self.convert_ids_to_tokens(self.prefix_tokens )
__A = self.convert_ids_to_tokens(self.suffix_tokens )
__A = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str ,pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str ,special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str ,self.prefix_tokens + self.suffix_tokens ) ) ,)
def UpperCamelCase_ ( self : int ,A : str ,A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__A = os.path.join(
snake_case__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file ,snake_case__ )
return (out_vocab_file,)
| 15 |
from __future__ import annotations
__lowerCamelCase = list[list[int]]
# assigning initial values to the grid
__lowerCamelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__lowerCamelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def UpperCamelCase ( __lowerCamelCase : Matrix , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def UpperCamelCase ( __lowerCamelCase : Matrix ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def UpperCamelCase ( __lowerCamelCase : Matrix ):
if location := find_empty_location(__lowerCamelCase ):
snake_case , snake_case : Union[str, Any] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
snake_case : List[Any] = digit
if sudoku(__lowerCamelCase ) is not None:
return grid
snake_case : Union[str, Any] = 0
return None
def UpperCamelCase ( __lowerCamelCase : Matrix ):
for row in grid:
for cell in row:
print(__lowerCamelCase , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
__lowerCamelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 59 | 0 |
from __future__ import annotations
import numpy as np
def snake_case_ (__A : list[float] ) -> Tuple:
return np.maximum(0 , __UpperCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 369 |
from pathlib import Path
import fire
def snake_case_ (__A : str , __A : str , __A : int ) -> Any:
__lowerCAmelCase : Tuple = Path(__A )
__lowerCAmelCase : Tuple = Path(__A )
dest_dir.mkdir(exist_ok=__A )
for path in src_dir.iterdir():
__lowerCAmelCase : str = [x.rstrip() for x in list(path.open().readlines() )][:n]
__lowerCAmelCase : Dict = dest_dir.joinpath(path.name )
print(__A )
dest_path.open("""w""" ).write("""\n""".join(__A ) )
if __name__ == "__main__":
fire.Fire(minify)
| 139 | 0 |
from importlib import import_module
from .logging import get_logger
lowerCamelCase__ : Any = get_logger(__name__)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any]=None ):
SCREAMING_SNAKE_CASE_ = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = module._original_module if isinstance(_lowerCAmelCase , _PatchedModuleObj ) else module
class lowerCamelCase_ :
'''simple docstring'''
lowercase_ = []
def __init__( self : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str]=None ):
SCREAMING_SNAKE_CASE_ = obj
SCREAMING_SNAKE_CASE_ = target
SCREAMING_SNAKE_CASE_ = new
SCREAMING_SNAKE_CASE_ = target.split('.' )[0]
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = attrs or []
def __enter__( self : str ):
*SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(_lowerCAmelCase ) ):
try:
SCREAMING_SNAKE_CASE_ = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
SCREAMING_SNAKE_CASE_ = getattr(self.obj , _lowerCAmelCase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(_lowerCAmelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
SCREAMING_SNAKE_CASE_ = obj_attr
# patch at top level
setattr(self.obj , _lowerCAmelCase , _PatchedModuleObj(_lowerCAmelCase , attrs=self.attrs ) )
SCREAMING_SNAKE_CASE_ = getattr(self.obj , _lowerCAmelCase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(_lowerCAmelCase , _lowerCAmelCase , _PatchedModuleObj(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , attrs=self.attrs ) )
SCREAMING_SNAKE_CASE_ = getattr(_lowerCAmelCase , _lowerCAmelCase )
# finally set the target attribute
setattr(_lowerCAmelCase , _lowerCAmelCase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
SCREAMING_SNAKE_CASE_ = getattr(import_module('.'.join(_lowerCAmelCase ) ) , _lowerCAmelCase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , _lowerCAmelCase ) is attr_value:
SCREAMING_SNAKE_CASE_ = getattr(self.obj , _lowerCAmelCase )
setattr(self.obj , _lowerCAmelCase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
SCREAMING_SNAKE_CASE_ = globals()['__builtins__'][target_attr]
setattr(self.obj , _lowerCAmelCase , self.new )
else:
raise RuntimeError(F"Tried to patch attribute {target_attr} instead of a submodule." )
def __exit__( self : Any , *_lowerCAmelCase : List[str] ):
for attr in list(self.original ):
setattr(self.obj , _lowerCAmelCase , self.original.pop(_lowerCAmelCase ) )
def lowerCAmelCase_ ( self : List[Any] ):
self.__enter__()
self._active_patches.append(self )
def lowerCAmelCase_ ( self : Union[str, Any] ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 225 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=13 , _lowerCAmelCase : Dict=30 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Dict=3 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : int=32 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : Optional[int]=37 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Optional[Any]=10 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : Optional[Any]=3 , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[Any]=2 , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE_ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ = num_patches + 2
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : Tuple ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = TFDeiTModel(config=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = TFDeiTForMaskedImageModeling(config=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = TFDeiTForMaskedImageModeling(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ = TFDeiTForImageClassification(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = TFDeiTForImageClassification(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowercase_ = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = TFDeiTModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def lowerCAmelCase_ ( self : Any ):
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , tf.keras.layers.Dense ) )
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any]=False ):
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = TFDeiTModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def UpperCAmelCase_ ( ) -> str:
SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : List[str] ):
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=_lowerCAmelCase , return_tensors='tf' )
# forward pass
SCREAMING_SNAKE_CASE_ = model(**_lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) ) | 225 | 1 |
'''simple docstring'''
from __future__ import annotations
A__ : str ='''#'''
class UpperCAmelCase :
def __init__( self : List[Any] ) -> None:
_lowerCAmelCase = {}
def lowercase__ ( self : int , __snake_case : str ) -> None:
_lowerCAmelCase = self._trie
for char in text:
if char not in trie:
_lowerCAmelCase = {}
_lowerCAmelCase = trie[char]
_lowerCAmelCase = True
def lowercase__ ( self : List[Any] , __snake_case : str ) -> tuple | list:
_lowerCAmelCase = self._trie
for char in prefix:
if char in trie:
_lowerCAmelCase = trie[char]
else:
return []
return self._elements(__snake_case )
def lowercase__ ( self : List[Any] , __snake_case : dict ) -> tuple:
_lowerCAmelCase = []
for c, v in d.items():
_lowerCAmelCase = [""" """] if c == END else [(c + s) for s in self._elements(__snake_case )]
result.extend(__snake_case )
return tuple(__snake_case )
A__ : Tuple =Trie()
A__ : List[str] =('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = trie.find_word(lowerCAmelCase )
return tuple(string + word for word in suffixes )
def UpperCamelCase__ ( ):
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 220 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if not (isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
_lowerCAmelCase = len(lowerCAmelCase )
_lowerCAmelCase = len(lowerCAmelCase )
_lowerCAmelCase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
_lowerCAmelCase = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
_lowerCAmelCase = i
_lowerCAmelCase = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Dict = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ["YolosFeatureExtractor"]
_SCREAMING_SNAKE_CASE : Dict = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 127 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'mobilenet_v2'
def __init__( self , __snake_case=3 , __snake_case=2_2_4 , __snake_case=1.0 , __snake_case=8 , __snake_case=8 , __snake_case=6 , __snake_case=3_2 , __snake_case=True , __snake_case=True , __snake_case="relu6" , __snake_case=True , __snake_case=0.8 , __snake_case=0.02 , __snake_case=0.001 , __snake_case=2_5_5 , **__snake_case , ):
super().__init__(**__snake_case )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
snake_case = num_channels
snake_case = image_size
snake_case = depth_multiplier
snake_case = depth_divisible_by
snake_case = min_depth
snake_case = expand_ratio
snake_case = output_stride
snake_case = first_layer_is_expansion
snake_case = finegrained_output
snake_case = hidden_act
snake_case = tf_padding
snake_case = classifier_dropout_prob
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = semantic_loss_ignore_index
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = version.parse('1.11' )
@property
def a_ ( self ):
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def a_ ( self ):
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def a_ ( self ):
return 1E-4
| 127 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
@property
def a_ ( self) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self) -> Optional[int]:
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def a_ ( self) -> Optional[int]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png')
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png')
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy')
# using the PNDM scheduler by default
snake_case_ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A red cat sitting on a park bench'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=15, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1e-2
| 312 | """simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def a_ ( self, lowerCAmelCase__=0) -> List[Any]:
snake_case_ = floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCAmelCase__))
snake_case_ = np.random.RandomState(lowerCAmelCase__)
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a_ ( self) -> Optional[Any]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def a_ ( self) -> List[str]:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> str:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
# warmup pass to apply optimizations
snake_case_ = pipe(**self.get_dummy_inputs())
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> int:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def a_ ( self) -> Dict:
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider')
snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = self.get_dummy_inputs()
snake_case_ = pipe(**lowerCAmelCase__).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case_ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
@property
def a_ ( self) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self) -> str:
snake_case_ = ort.SessionOptions()
snake_case_ = False
return options
def a_ ( self) -> Any:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
# using the PNDM scheduler by default
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def a_ ( self) -> List[Any]:
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg')
snake_case_ = init_image.resize((768, 512))
snake_case_ = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx')
snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
snake_case_ = 'A fantasy landscape, trending on artstation'
snake_case_ = np.random.RandomState(0)
snake_case_ = pipe(
prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase__, output_type='np', )
snake_case_ = output.images
snake_case_ = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 312 | 1 |
from copy import deepcopy
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase__ : list[int] | None = None , UpperCAmelCase__ : int | None = None) ->None:
'''simple docstring'''
if arr is None and size is not None:
A__ = size
A__ = [0] * size
elif arr is not None:
self.init(UpperCAmelCase__)
else:
raise ValueError('''Either arr or size must be specified''')
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : list[int]) ->None:
'''simple docstring'''
A__ = len(UpperCAmelCase__)
A__ = deepcopy(UpperCAmelCase__)
for i in range(1 , self.size):
A__ = self.next_(UpperCAmelCase__)
if j < self.size:
self.tree[j] += self.tree[i]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->list[int]:
'''simple docstring'''
A__ = self.tree[:]
for i in range(self.size - 1 , 0 , -1):
A__ = self.next_(UpperCAmelCase__)
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ : int) ->int:
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ : int) ->int:
'''simple docstring'''
return index - (index & (-index))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int) ->None:
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
A__ = self.next_(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int) ->None:
'''simple docstring'''
self.add(UpperCAmelCase__ , value - self.get(UpperCAmelCase__))
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : int) ->int:
'''simple docstring'''
if right == 0:
return 0
A__ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
A__ = self.prev(UpperCAmelCase__)
return result
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int) ->int:
'''simple docstring'''
return self.prefix(UpperCAmelCase__) - self.prefix(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : int) ->int:
'''simple docstring'''
return self.query(UpperCAmelCase__ , index + 1)
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : int) ->int:
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
A__ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
A__ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
A__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A__ = 1
if upper_limit > 0:
A__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_lowerCamelCase : List[Any] = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 14 | 1 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
snake_case_ = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
snake_case_ = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
snake_case_ = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def _lowerCAmelCase ( lowercase_ ):
def remove_articles(lowercase_ ):
UpperCAmelCase = re.compile(R'\b(a|an|the)\b' , re.UNICODE )
return re.sub(_A , ' ' , _A )
def white_space_fix(lowercase_ ):
return " ".join(text.split() )
def remove_punc(lowercase_ ):
UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
return int(normalize_answer(_A ) == normalize_answer(_A ) )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = [any(compute_exact(_A , _A ) for ref in refs ) for pred, refs in zip(_A , _A )]
return (sum(_A ) / len(_A )) * 100
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
UpperCAmelCase = Counter(_A )
UpperCAmelCase = Counter(_A )
UpperCAmelCase = Counter()
for sgram, scount in sgramcounter.items():
UpperCAmelCase = scount * numref
UpperCAmelCase = Counter(_A )
UpperCAmelCase = Counter()
for cgram, ccount in cgramcounter.items():
UpperCAmelCase = ccount * numref
# KEEP
UpperCAmelCase = sgramcounter_rep & cgramcounter_rep
UpperCAmelCase = keepgramcounter_rep & rgramcounter
UpperCAmelCase = sgramcounter_rep & rgramcounter
UpperCAmelCase = 0
UpperCAmelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase = 1
UpperCAmelCase = 1
if len(_A ) > 0:
UpperCAmelCase = keeptmpscorea / len(_A )
if len(_A ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
UpperCAmelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
UpperCAmelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
UpperCAmelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
UpperCAmelCase = sgramcounter_rep - cgramcounter_rep
UpperCAmelCase = delgramcounter_rep - rgramcounter
UpperCAmelCase = sgramcounter_rep - rgramcounter
UpperCAmelCase = 0
UpperCAmelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase = 1
if len(_A ) > 0:
UpperCAmelCase = deltmpscorea / len(_A )
# ADDITION
UpperCAmelCase = set(_A ) - set(_A )
UpperCAmelCase = set(_A ) & set(_A )
UpperCAmelCase = set(_A ) - set(_A )
UpperCAmelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
UpperCAmelCase = 1
UpperCAmelCase = 1
if len(_A ) > 0:
UpperCAmelCase = addtmpscore / len(_A )
if len(_A ) > 0:
UpperCAmelCase = addtmpscore / len(_A )
UpperCAmelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
UpperCAmelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = len(_A )
UpperCAmelCase = ssent.split(' ' )
UpperCAmelCase = csent.split(' ' )
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for rsent in rsents:
UpperCAmelCase = rsent.split(' ' )
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
ragramslist.append(_A )
for i in range(0 , len(_A ) - 1 ):
if i < len(_A ) - 1:
UpperCAmelCase = ragrams[i] + ' ' + ragrams[i + 1]
ragrams.append(_A )
if i < len(_A ) - 2:
UpperCAmelCase = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2]
ragrams.append(_A )
if i < len(_A ) - 3:
UpperCAmelCase = ragrams[i] + ' ' + ragrams[i + 1] + ' ' + ragrams[i + 2] + ' ' + ragrams[i + 3]
ragrams.append(_A )
ragramslist.append(_A )
ragramslist.append(_A )
ragramslist.append(_A )
for i in range(0 , len(_A ) - 1 ):
if i < len(_A ) - 1:
UpperCAmelCase = sagrams[i] + ' ' + sagrams[i + 1]
sagrams.append(_A )
if i < len(_A ) - 2:
UpperCAmelCase = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2]
sagrams.append(_A )
if i < len(_A ) - 3:
UpperCAmelCase = sagrams[i] + ' ' + sagrams[i + 1] + ' ' + sagrams[i + 2] + ' ' + sagrams[i + 3]
sagrams.append(_A )
for i in range(0 , len(_A ) - 1 ):
if i < len(_A ) - 1:
UpperCAmelCase = cagrams[i] + ' ' + cagrams[i + 1]
cagrams.append(_A )
if i < len(_A ) - 2:
UpperCAmelCase = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2]
cagrams.append(_A )
if i < len(_A ) - 3:
UpperCAmelCase = cagrams[i] + ' ' + cagrams[i + 1] + ' ' + cagrams[i + 2] + ' ' + cagrams[i + 3]
cagrams.append(_A )
(UpperCAmelCase) = SARIngram(_A , _A , _A , _A )
(UpperCAmelCase) = SARIngram(_A , _A , _A , _A )
(UpperCAmelCase) = SARIngram(_A , _A , _A , _A )
(UpperCAmelCase) = SARIngram(_A , _A , _A , _A )
UpperCAmelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
UpperCAmelCase = sum([delascore, delascore, delascore, delascore] ) / 4
UpperCAmelCase = sum([addascore, addascore, addascore, addascore] ) / 4
UpperCAmelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _lowerCAmelCase ( lowercase_ , lowercase_ = True , lowercase_ = "13a" , lowercase_ = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
UpperCAmelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
UpperCAmelCase = sacrebleu.metrics.bleu._get_tokenizer(_A )()(_A )
else:
UpperCAmelCase = sacrebleu.TOKENIZERS[tokenizer]()(_A )
elif tokenizer == "moses":
UpperCAmelCase = sacremoses.MosesTokenizer().tokenize(_A , return_str=_A , escape=_A )
elif tokenizer == "penn":
UpperCAmelCase = sacremoses.MosesTokenizer().penn_tokenize(_A , return_str=_A )
else:
UpperCAmelCase = sentence
if not return_str:
UpperCAmelCase = normalized_sent.split()
return normalized_sent
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
if not (len(_A ) == len(_A ) == len(_A )):
raise ValueError('Sources length must match predictions and references lengths.' )
UpperCAmelCase = 0
for src, pred, refs in zip(_A , _A , _A ):
sari_score += SARIsent(normalize(_A ) , normalize(_A ) , [normalize(_A ) for sent in refs] )
UpperCAmelCase = sari_score / len(_A )
return 100 * sari_score
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_="exp" , lowercase_=None , lowercase_=False , lowercase_=False , lowercase_=False , ):
UpperCAmelCase = len(references[0] )
if any(len(_A ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
UpperCAmelCase = [[refs[i] for refs in references] for i in range(_A )]
UpperCAmelCase = sacrebleu.corpus_bleu(
_A , _A , smooth_method=_A , smooth_value=_A , force=_A , lowercase=_A , use_effective_order=_A , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=[
'https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py',
'https://github.com/cocoxu/simplification/blob/master/SARI.py',
'https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py',
'https://github.com/mjpost/sacreBLEU',
] , reference_urls=[
'https://www.aclweb.org/anthology/Q16-1029.pdf',
'https://github.com/mjpost/sacreBLEU',
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def UpperCAmelCase__ ( self :str , lowercase_ :Optional[Any] , lowercase_ :int , lowercase_ :str ) -> Optional[int]:
UpperCAmelCase = {}
result.update({'sari': compute_sari(sources=__snake_case , predictions=__snake_case , references=__snake_case )} )
result.update({'sacrebleu': compute_sacrebleu(predictions=__snake_case , references=__snake_case )} )
result.update({'exact': compute_em(predictions=__snake_case , references=__snake_case )} )
return result
| 358 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
snake_case_ = 3
def _lowerCAmelCase ( lowercase_ ):
print('Generating primitive root of p' )
while True:
UpperCAmelCase = random.randrange(3 , lowercase_ )
if pow(lowercase_ , 2 , lowercase_ ) == 1:
continue
if pow(lowercase_ , lowercase_ , lowercase_ ) == 1:
continue
return g
def _lowerCAmelCase ( lowercase_ ):
print('Generating prime p...' )
UpperCAmelCase = rabin_miller.generate_large_prime(lowercase_ ) # select large prime number.
UpperCAmelCase = primitive_root(lowercase_ ) # one primitive root on modulo p.
UpperCAmelCase = random.randrange(3 , lowercase_ ) # private_key -> have to be greater than 2 for safety.
UpperCAmelCase = cryptomath.find_mod_inverse(pow(lowercase_ , lowercase_ , lowercase_ ) , lowercase_ )
UpperCAmelCase = (key_size, e_a, e_a, p)
UpperCAmelCase = (key_size, d)
return public_key, private_key
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('\nWARNING:' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'Use a different name or delete these files and re-run this program.' )
sys.exit()
UpperCAmelCase , UpperCAmelCase = generate_key(lowercase_ )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , 'w' ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , 'w' ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def _lowerCAmelCase ( ):
print('Making key files...' )
make_key_files('elgamal' , 2048 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 181 | 0 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __lowercase ( _a , _a ):
snake_case_ : Any = []
for part_id in partition_order:
snake_case_ : str = df.where(f"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(_a ):
expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ):
snake_case_ : Tuple = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ : Any = spark.range(100 ).repartition(1 )
snake_case_ : Optional[int] = Spark(_a )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ):
snake_case_ : Dict = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ : List[str] = spark.range(10 ).repartition(2 )
snake_case_ : str = [1, 0]
snake_case_ : Optional[Any] = _generate_iterable_examples(_a , _a ) # Reverse the partitions.
snake_case_ : Dict = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , _a )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case_, snake_case_ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ):
snake_case_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ : List[Any] = spark.range(10 ).repartition(1 )
snake_case_ : List[str] = SparkExamplesIterable(_a )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_a ):
assert row_id == f"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ):
snake_case_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
snake_case_ : str = lambda _a : x.reverse()
snake_case_ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , [2, 1, 0] )
snake_case_ : Optional[Any] = SparkExamplesIterable(_a ).shuffle_data_sources(_a )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_a ):
snake_case_, snake_case_ : Dict = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ):
snake_case_ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ : Union[str, Any] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case_ : Tuple = SparkExamplesIterable(_a ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case_ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , [0, 2] )
for i, (row_id, row_dict) in enumerate(_a ):
snake_case_, snake_case_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case_ : str = SparkExamplesIterable(_a ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case_ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(_a , [1, 3] )
for i, (row_id, row_dict) in enumerate(_a ):
snake_case_, snake_case_ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ):
snake_case_ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ : str = spark.range(100 ).repartition(1 )
snake_case_ : Dict = Spark(_a )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 264 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _UpperCAmelCase :
def __init__( self : List[Any] ):
snake_case_ : List[str] = ''''''
snake_case_ : Tuple = ''''''
snake_case_ : int = []
snake_case_ : Optional[int] = 0
snake_case_ : Optional[Any] = 256
snake_case_ : Tuple = 0
snake_case_ : Tuple = 0
snake_case_ : Optional[Any] = 0
snake_case_ : Any = 0
def _snake_case ( self : Optional[Any] , lowercase_ : List[Any] ):
snake_case_ : List[Any] = cva.imread(lowercase_ , 0 )
snake_case_ : Tuple = copy.deepcopy(self.img )
snake_case_, snake_case_, snake_case_ : List[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
snake_case_ : str = np.sum(lowercase_ )
for i in range(len(lowercase_ ) ):
snake_case_ : Optional[Any] = x[i] / self.k
self.sk += prk
snake_case_ : Any = (self.L - 1) * self.sk
if self.rem != 0:
snake_case_ : Dict = int(last % last )
snake_case_ : Union[str, Any] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowercase_ )
snake_case_ : int = int(np.ma.count(self.img ) / self.img[1].size )
snake_case_ : Tuple = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
snake_case_ : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
snake_case_ : List[str] = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def _snake_case ( self : Tuple ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _snake_case ( self : int ):
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowercase__ : Any = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
lowercase__ : Any = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 264 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
A: Optional[int] = logging.get_logger(__name__)
def _snake_case ( UpperCamelCase : Union[tf.Tensor, np.ndarray] ):
if isinstance(_UpperCAmelCase , np.ndarray ):
return list(tensor.shape )
UpperCAmelCase : Optional[Any] = tf.shape(_UpperCAmelCase )
if tensor.shape == tf.TensorShape(_UpperCAmelCase ):
return dynamic
UpperCAmelCase : int = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_UpperCAmelCase )]
def _snake_case ( UpperCamelCase : tf.Tensor , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[str] = None ):
return tf.nn.softmax(logits=logits + 1e-9 , axis=_UpperCAmelCase , name=_UpperCAmelCase )
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : str=1e-5 , UpperCamelCase : Union[str, Any]=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
UpperCAmelCase : Union[str, Any] = tf.nn.moments(_UpperCAmelCase , axes=[axis] , keepdims=_UpperCAmelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
UpperCAmelCase : Optional[Any] = [1] * inputs.shape.rank
UpperCAmelCase : str = shape_list(_UpperCAmelCase )[axis]
UpperCAmelCase : Any = tf.reshape(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase : int = tf.reshape(_UpperCAmelCase , _UpperCAmelCase )
# Compute layer normalization using the batch_normalization
# function.
UpperCAmelCase : int = tf.nn.batch_normalization(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , offset=_UpperCAmelCase , scale=_UpperCAmelCase , variance_epsilon=_UpperCAmelCase , )
return outputs
def _snake_case ( UpperCamelCase : str , UpperCamelCase : Dict=0 , UpperCamelCase : int=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
UpperCAmelCase : Any = tf.shape(_UpperCAmelCase )
UpperCAmelCase : int = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
UpperCAmelCase : Dict = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( UpperCamelCase : tf.Tensor ):
if not isinstance(_UpperCAmelCase , tf.Tensor ):
UpperCAmelCase : List[str] = tf.convert_to_tensor(_UpperCAmelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
UpperCAmelCase : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
UpperCAmelCase : List[str] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
UpperCAmelCase : Any = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _snake_case ( UpperCamelCase : tf.Tensor , UpperCamelCase : int , UpperCamelCase : str = "input_ids" ):
tf.debugging.assert_less(
_UpperCAmelCase , tf.cast(_UpperCAmelCase , dtype=tensor.dtype ) , message=(
F"The maximum value of {tensor_name} ({tf.math.reduce_max(_UpperCAmelCase )}) must be smaller than the embedding "
F"layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time."
) , )
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : Tuple ):
UpperCAmelCase : Union[str, Any] = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
UpperCAmelCase : Any = [x for x in data if len(_UpperCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
F"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
F"bytes: {bad_attributes}" )
UpperCAmelCase : Tuple = np.asarray(_UpperCAmelCase )
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : List[str] = np.array_split(_UpperCAmelCase , _UpperCAmelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
UpperCAmelCase : List[str] = np.array_split(_UpperCAmelCase , _UpperCAmelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_UpperCAmelCase ):
UpperCAmelCase : Dict = chunk_data
else:
UpperCAmelCase : Optional[int] = data
def _snake_case ( UpperCamelCase : int , UpperCamelCase : Union[str, Any] ):
if name in group.attrs:
UpperCAmelCase : List[str] = [n.decode("""utf8""" ) if hasattr(_UpperCAmelCase , """decode""" ) else n for n in group.attrs[name]]
else:
UpperCAmelCase : int = []
UpperCAmelCase : Tuple = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(_UpperCAmelCase , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def _snake_case ( UpperCamelCase : Optional[int] ):
def _expand_single_ad_tensor(UpperCamelCase : Tuple ):
if isinstance(_UpperCAmelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_UpperCAmelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , _UpperCAmelCase )
| 350 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
UpperCAmelCase : Optional[int] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
UpperCAmelCase : Tuple = bertabert.config.encoder.vocab_size
UpperCAmelCase : int = tokenizer.sep_token_id
UpperCAmelCase : Dict = tokenizer.cls_token_id
UpperCAmelCase : int = 128
UpperCAmelCase : List[str] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
UpperCAmelCase : Union[str, Any] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
UpperCAmelCase : Optional[int] = train_dataset.select(range(32 ) )
UpperCAmelCase : int = val_dataset.select(range(16 ) )
UpperCAmelCase : List[str] = 4
def _map_to_encoder_decoder_inputs(_SCREAMING_SNAKE_CASE ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCAmelCase : str = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_SCREAMING_SNAKE_CASE , max_length=512 )
UpperCAmelCase : str = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_SCREAMING_SNAKE_CASE , max_length=128 )
UpperCAmelCase : Optional[Any] = inputs.input_ids
UpperCAmelCase : Union[str, Any] = inputs.attention_mask
UpperCAmelCase : Union[str, Any] = outputs.input_ids
UpperCAmelCase : Any = outputs.input_ids.copy()
UpperCAmelCase : Tuple = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
UpperCAmelCase : List[Any] = outputs.attention_mask
assert all(len(_SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(_SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[Any] = pred.label_ids
UpperCAmelCase : Tuple = pred.predictions
# all unnecessary tokens are removed
UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_SCREAMING_SNAKE_CASE ) )] ) / len(_SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
UpperCAmelCase : List[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
UpperCAmelCase : List[str] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
UpperCAmelCase : Dict = self.get_auto_remove_tmp_dir()
UpperCAmelCase : Dict = SeqaSeqTrainingArguments(
output_dir=_SCREAMING_SNAKE_CASE , per_device_train_batch_size=_SCREAMING_SNAKE_CASE , per_device_eval_batch_size=_SCREAMING_SNAKE_CASE , predict_with_generate=_SCREAMING_SNAKE_CASE , evaluation_strategy="""steps""" , do_train=_SCREAMING_SNAKE_CASE , do_eval=_SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCAmelCase : List[str] = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 76 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE_ = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE_ = {
"""google/bigbird-roberta-base""": 4_0_9_6,
"""google/bigbird-roberta-large""": 4_0_9_6,
"""google/bigbird-base-trivia-itc""": 4_0_9_6,
}
SCREAMING_SNAKE_CASE_ = """▁"""
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = VOCAB_FILES_NAMES
__snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : Dict = BigBirdTokenizer
__snake_case : Any = ["input_ids", "attention_mask"]
__snake_case : List[int] = []
def __init__( self : Union[str, Any] ,lowerCamelCase__ : int=None ,lowerCamelCase__ : int=None ,lowerCamelCase__ : Union[str, Any]="<unk>" ,lowerCamelCase__ : int="<s>" ,lowerCamelCase__ : List[str]="</s>" ,lowerCamelCase__ : List[Any]="<pad>" ,lowerCamelCase__ : Tuple="[SEP]" ,lowerCamelCase__ : Optional[Any]="[MASK]" ,lowerCamelCase__ : Dict="[CLS]" ,**lowerCamelCase__ : int ,) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else bos_token
SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else eos_token
SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else unk_token
SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else pad_token
SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else cls_token
SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else mask_token
super().__init__(
lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,**lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE = os.path.join(
lowerCamelCase__ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file ,lowerCamelCase__ )
return (out_vocab_file,)
| 296 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
SCREAMING_SNAKE_CASE_ = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
SCREAMING_SNAKE_CASE_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
if "://" in dataset_path:
SCREAMING_SNAKE_CASE = dataset_path.split("""://""" )[1]
return dataset_path
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = not is_remote_filesystem(_SCREAMING_SNAKE_CASE )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_SCREAMING_SNAKE_CASE ) , fs._strip_protocol(_SCREAMING_SNAKE_CASE ) )
else:
fs.mv(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , recursive=_SCREAMING_SNAKE_CASE )
def __lowercase ( ) -> None:
'''simple docstring'''
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = threading.Lock()
| 296 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = size
# approximate the overall size of segment tree with given value
__SCREAMING_SNAKE_CASE :Optional[int] = [0 for i in range(0 ,4 * size )]
# create array to store lazy update
__SCREAMING_SNAKE_CASE :List[Any] = [0 for i in range(0 ,4 * size )]
__SCREAMING_SNAKE_CASE :List[str] = [0 for i in range(0 ,4 * size )] # flag for lazy update
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
return idx * 2
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
return idx * 2 + 1
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
if left_element == right_element:
__SCREAMING_SNAKE_CASE :Tuple = a[left_element - 1]
else:
__SCREAMING_SNAKE_CASE :str = (left_element + right_element) // 2
self.build(self.left(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
self.build(self.right(SCREAMING_SNAKE_CASE__ ) ,mid + 1 ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Any = max(
self.segment_tree[self.left(SCREAMING_SNAKE_CASE__ )] ,self.segment_tree[self.right(SCREAMING_SNAKE_CASE__ )] )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> bool:
"""simple docstring"""
if self.flag[idx] is True:
__SCREAMING_SNAKE_CASE :int = self.lazy[idx]
__SCREAMING_SNAKE_CASE :Tuple = False
if left_element != right_element:
__SCREAMING_SNAKE_CASE :Optional[Any] = self.lazy[idx]
__SCREAMING_SNAKE_CASE :str = self.lazy[idx]
__SCREAMING_SNAKE_CASE :Union[str, Any] = True
__SCREAMING_SNAKE_CASE :str = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__SCREAMING_SNAKE_CASE :Any = val
if left_element != right_element:
__SCREAMING_SNAKE_CASE :Union[str, Any] = val
__SCREAMING_SNAKE_CASE :Optional[int] = val
__SCREAMING_SNAKE_CASE :Tuple = True
__SCREAMING_SNAKE_CASE :Optional[int] = True
return True
__SCREAMING_SNAKE_CASE :Optional[int] = (left_element + right_element) // 2
self.update(self.left(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
self.update(self.right(SCREAMING_SNAKE_CASE__ ) ,mid + 1 ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = max(
self.segment_tree[self.left(SCREAMING_SNAKE_CASE__ )] ,self.segment_tree[self.right(SCREAMING_SNAKE_CASE__ )] )
return True
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> int | float:
"""simple docstring"""
if self.flag[idx] is True:
__SCREAMING_SNAKE_CASE :int = self.lazy[idx]
__SCREAMING_SNAKE_CASE :Dict = False
if left_element != right_element:
__SCREAMING_SNAKE_CASE :Tuple = self.lazy[idx]
__SCREAMING_SNAKE_CASE :List[Any] = self.lazy[idx]
__SCREAMING_SNAKE_CASE :int = True
__SCREAMING_SNAKE_CASE :Union[str, Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__SCREAMING_SNAKE_CASE :Union[str, Any] = (left_element + right_element) // 2
__SCREAMING_SNAKE_CASE :Any = self.query(self.left(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = self.query(self.right(SCREAMING_SNAKE_CASE__ ) ,mid + 1 ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
return max(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def __str__( self ) -> str:
"""simple docstring"""
return str([self.query(1 ,1 ,self.size ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for i in range(1 ,self.size + 1 )] )
if __name__ == "__main__":
lowerCamelCase_ = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
lowerCamelCase_ = 1_5
lowerCamelCase_ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt) | 239 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {"vocab_file": "spiece.model"}
lowerCamelCase_ = {
"vocab_file": {
"bert_for_seq_generation": (
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
),
}
}
lowerCamelCase_ = {"bert_for_seq_generation": 5_1_2}
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[int] = []
SCREAMING_SNAKE_CASE_ : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__="<s>" ,SCREAMING_SNAKE_CASE__="</s>" ,SCREAMING_SNAKE_CASE__="<unk>" ,SCREAMING_SNAKE_CASE__="<pad>" ,SCREAMING_SNAKE_CASE__="<::::>" ,SCREAMING_SNAKE_CASE__ = None ,**SCREAMING_SNAKE_CASE__ ,) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__SCREAMING_SNAKE_CASE :Dict = vocab_file
__SCREAMING_SNAKE_CASE :Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self.__dict__.copy()
__SCREAMING_SNAKE_CASE :Dict = None
return state
def __setstate__( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__SCREAMING_SNAKE_CASE :str = {}
__SCREAMING_SNAKE_CASE :str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
return token
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = []
__SCREAMING_SNAKE_CASE :List[str] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
__SCREAMING_SNAKE_CASE :Dict = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE :Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ ,'''wb''' ) as fi:
__SCREAMING_SNAKE_CASE :Tuple = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,) | 239 | 1 |
'''simple docstring'''
from math import pi
def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int ) ->float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 58 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple ,snake_case : Optional[int] ,snake_case : Dict=13 ,snake_case : str=7 ,snake_case : Dict=True ,snake_case : List[Any]=True ,snake_case : Dict=False ,snake_case : int=True ,snake_case : Dict=99 ,snake_case : int=32 ,snake_case : List[str]=5 ,snake_case : Optional[Any]=4 ,snake_case : Tuple=64 ,snake_case : List[Any]="gelu" ,snake_case : str=0.1 ,snake_case : str=0.1 ,snake_case : List[str]=512 ,snake_case : List[str]=16 ,snake_case : str=2 ,snake_case : Dict=0.02 ,snake_case : Optional[int]=3 ,snake_case : int=4 ,snake_case : Any=None ,snake_case : Union[str, Any]=2 ,snake_case : List[Any]=2 ,snake_case : Optional[int]=2 ,snake_case : Dict=2 ,snake_case : List[str]=4 ,snake_case : int=1 ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =seq_length
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_input_mask
SCREAMING_SNAKE_CASE =use_token_type_ids
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =type_vocab_size
SCREAMING_SNAKE_CASE =type_sequence_label_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =num_choices
SCREAMING_SNAKE_CASE =scope
SCREAMING_SNAKE_CASE =q_groups
SCREAMING_SNAKE_CASE =k_groups
SCREAMING_SNAKE_CASE =v_groups
SCREAMING_SNAKE_CASE =post_attention_groups
SCREAMING_SNAKE_CASE =intermediate_groups
SCREAMING_SNAKE_CASE =output_groups
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Optional[int] ):
return SqueezeBertConfig(
embedding_size=self.hidden_size ,vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,attention_probs_dropout_prob=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,q_groups=self.q_groups ,k_groups=self.k_groups ,v_groups=self.v_groups ,post_attention_groups=self.post_attention_groups ,intermediate_groups=self.intermediate_groups ,output_groups=self.output_groups ,)
def _lowerCAmelCase ( self : Dict ,snake_case : List[str] ,snake_case : Optional[Any] ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : str ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertModel(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,snake_case )
SCREAMING_SNAKE_CASE =model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Optional[int] ,snake_case : Union[str, Any] ,snake_case : List[Any] ,snake_case : int ,snake_case : Any ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =SqueezeBertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : Tuple ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : List[str] ,snake_case : List[Any] ,snake_case : Dict ,snake_case : Optional[Any] ):
SCREAMING_SNAKE_CASE =SqueezeBertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,start_positions=snake_case ,end_positions=snake_case )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Tuple ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Any ,snake_case : Tuple ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : List[str] ,snake_case : List[str] ,snake_case : Tuple ,snake_case : Dict ,snake_case : str ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =SqueezeBertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =model(snake_case ,attention_mask=snake_case ,labels=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : List[str] ,snake_case : Dict ,snake_case : str ,snake_case : Union[str, Any] ,snake_case : Union[str, Any] ,snake_case : Any ,snake_case : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.num_choices
SCREAMING_SNAKE_CASE =SqueezeBertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
SCREAMING_SNAKE_CASE =input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE =model(
snake_case ,attention_mask=snake_case ,labels=snake_case ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) =config_and_inputs
SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCAmelCase = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = False
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=snake_case ,dim=37 )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*snake_case )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*snake_case )
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*snake_case )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*snake_case )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*snake_case )
@slow
def _lowerCAmelCase ( self : str ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =SqueezeBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_sentencepiece
@require_tokenizers
@require_torch
class a_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
SCREAMING_SNAKE_CASE =torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE =model(snake_case )[0]
SCREAMING_SNAKE_CASE =torch.Size((1, 3) )
self.assertEqual(output.shape ,snake_case )
SCREAMING_SNAKE_CASE =torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(snake_case ,snake_case ,atol=1e-4 ) )
| 334 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__snake_case =logging.getLogger(__name__)
torch.set_grad_enabled(False)
__snake_case ="""cuda""" if torch.cuda.is_available() else """cpu"""
def a_ ( lowerCamelCase : str , lowerCamelCase : int=100 , lowerCamelCase : List[Any]=" " ):
lowerCAmelCase = text.split(lowerCamelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowerCamelCase ) , lowerCamelCase )]
def a_ ( lowerCamelCase : dict ):
lowerCAmelCase , lowerCAmelCase = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(lowerCamelCase ):
titles.append(title if title is not None else '' )
texts.append(lowerCamelCase )
return {"title": titles, "text": texts}
def a_ ( lowerCamelCase : dict , lowerCamelCase : DPRContextEncoder , lowerCamelCase : DPRContextEncoderTokenizerFast ):
lowerCAmelCase = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=lowerCamelCase , padding='longest' , return_tensors='pt' )['input_ids']
lowerCAmelCase = ctx_encoder(input_ids.to(device=lowerCamelCase ) , return_dict=lowerCamelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def a_ ( lowerCamelCase : "RagExampleArguments" , lowerCamelCase : "ProcessingArguments" , lowerCamelCase : "IndexHnswArguments" , ):
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase = dataset.map(lowerCamelCase , batched=lowerCamelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowerCamelCase )
lowerCAmelCase = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase = dataset.map(
partial(lowerCamelCase , ctx_encoder=lowerCamelCase , ctx_tokenizer=lowerCamelCase ) , batched=lowerCamelCase , batch_size=processing_args.batch_size , features=lowerCamelCase , )
# And finally save your dataset
lowerCAmelCase = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(lowerCamelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=lowerCamelCase )
# And save the index
lowerCAmelCase = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(lowerCamelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : str = field(
default=str(Path(__lowercase ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
lowerCamelCase : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
lowerCamelCase : str = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
lowerCamelCase : str = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
lowerCamelCase : Optional[str] = field(
default=str(Path(__lowercase ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : Optional[int] = field(
default=__lowercase , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
lowerCamelCase : int = field(
default=16 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : int = field(
default=768 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
lowerCamelCase : int = field(
default=128 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__snake_case =HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__snake_case , __snake_case , __snake_case =parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__snake_case =rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 55 |
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 55 | 1 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 - _cos) / 2
__a = 1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 + _cos) / 2
__a = -1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = _sin / 2
__a = 0
__a = -ba
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 1 - alpha
__a = -2 * _cos
__a = 1 + alpha
__a = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = 1 + alpha * big_a
__a = -2 * _cos
__a = 1 - alpha * big_a
__a = 1 + alpha / big_a
__a = -2 * _cos
__a = 1 - alpha / big_a
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (pmc + aaa)
__a = 2 * big_a * mpc
__a = big_a * (pmc - aaa)
__a = ppmc + aaa
__a = -2 * pmpc
__a = ppmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (ppmc + aaa)
__a = -2 * big_a * pmpc
__a = big_a * (ppmc - aaa)
__a = pmc + aaa
__a = 2 * mpc
__a = pmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt | 6 |
"""simple docstring"""
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_UpperCAmelCase = 6
_UpperCAmelCase = 1
_UpperCAmelCase = 1901
_UpperCAmelCase = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_UpperCAmelCase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_UpperCAmelCase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_UpperCAmelCase = day - days_per_month[month - 2]
if month > 12:
year += 1
_UpperCAmelCase = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 260 | 0 |
def A__ ( __lowerCamelCase=2_81_23 ):
SCREAMING_SNAKE_CASE_ = [1] * (limit + 1)
for i in range(2, int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1, limit // i + 1 ):
sum_divs[k * i] += k + i
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = 0
for n in range(1, limit + 1 ):
if sum_divs[n] > n:
abundants.add(__lowerCamelCase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 357 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__UpperCAmelCase = ["bert-base-uncased", "bert-base-cased"]
__UpperCAmelCase = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class UpperCamelCase__ ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , _A ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE_ = tokenizer
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ = TFAutoModel.from_config(_A )
def _UpperCamelCase ( self , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A )
SCREAMING_SNAKE_CASE_ = self.bert(**_A )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> Optional[Any]:
super().setUp()
SCREAMING_SNAKE_CASE_ = [
BertTokenizer.from_pretrained(_A ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
SCREAMING_SNAKE_CASE_ = [TFBertTokenizer.from_pretrained(_A ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(_A , use_fast_bert_tokenizer=_A )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
SCREAMING_SNAKE_CASE_ = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
SCREAMING_SNAKE_CASE_ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _UpperCamelCase ( self ) -> str:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
SCREAMING_SNAKE_CASE_ = tokenizer(_A , return_tensors='''tf''' , padding='''longest''' )
SCREAMING_SNAKE_CASE_ = tf_tokenizer(_A )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def _UpperCamelCase ( self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE_ = tf_tokenizer(self.paired_sentences )
SCREAMING_SNAKE_CASE_ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def _UpperCamelCase ( self ) -> int:
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE_ = tf.function(_A )
for test_inputs in (self.test_sentences, self.paired_sentences):
SCREAMING_SNAKE_CASE_ = tf.constant(_A )
SCREAMING_SNAKE_CASE_ = compiled_tokenizer(_A )
SCREAMING_SNAKE_CASE_ = tf_tokenizer(_A )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _UpperCamelCase ( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE_ = ModelToSave(tokenizer=_A )
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(self.test_sentences )
SCREAMING_SNAKE_CASE_ = model(_A ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE_ = Path(_A ) / '''saved.model'''
model.save(_A )
SCREAMING_SNAKE_CASE_ = tf.keras.models.load_model(_A )
SCREAMING_SNAKE_CASE_ = loaded_model(_A )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 257 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 100 , ):
_lowerCamelCase : Any = x_start
_lowerCamelCase : Optional[int] = fnc(lowercase__ )
_lowerCamelCase : str = 0.0
for _ in range(lowercase__ ):
# Approximates curve as a sequence of linear lines and sums their length
_lowerCamelCase : str = (x_end - x_start) / steps + xa
_lowerCamelCase : Any = fnc(lowercase__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
_lowerCamelCase : List[Any] = xa
_lowerCamelCase : Dict = fxa
return length
if __name__ == "__main__":
def _snake_case ( lowercase__ ):
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowercase__ = 10
while i <= 10_0000:
print(F"With {i} steps: {line_length(f, -10, 10, i)}")
i *= 10 | 96 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Union[str, Any] = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
lowerCAmelCase : str = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCAmelCase : int = parser.parse_args()
if args.model_type == "roberta":
lowerCAmelCase : List[str] = RobertaForMaskedLM.from_pretrained(args.model_name)
lowerCAmelCase : List[str] = 'roberta'
elif args.model_type == "gpt2":
lowerCAmelCase : str = GPTaLMHeadModel.from_pretrained(args.model_name)
lowerCAmelCase : int = 'transformer'
lowerCAmelCase : Any = model.state_dict()
lowerCAmelCase : int = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
lowerCAmelCase : int = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
lowerCAmelCase : Any = f"""{prefix}.embeddings.{w}.weight"""
lowerCAmelCase : Any = state_dict[param_name]
for w in ["weight", "bias"]:
lowerCAmelCase : Union[str, Any] = f"""{prefix}.embeddings.LayerNorm.{w}"""
lowerCAmelCase : str = state_dict[param_name]
# Transformer Blocks #
lowerCAmelCase : Any = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
lowerCAmelCase : Optional[int] = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
lowerCAmelCase : int = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
lowerCAmelCase : Union[str, Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
lowerCAmelCase : int = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCAmelCase : str = state_dict[f"""lm_head.dense.{w}"""]
lowerCAmelCase : int = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
lowerCAmelCase : Tuple = state_dict[f"""{prefix}.ln_f.{w}"""]
lowerCAmelCase : Optional[int] = state_dict['lm_head.weight']
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 251 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowerCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ )-> int:
'''simple docstring'''
super().__init__()
UpperCamelCase = torchvision.models.resnetaaa(pretrained=A_ )
UpperCamelCase = list(model.children() )[:-2]
UpperCamelCase = nn.Sequential(*A_ )
UpperCamelCase = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.pool(self.model(A_ ) )
UpperCamelCase = torch.flatten(A_ , start_dim=2 )
UpperCamelCase = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , A_ , A_ , A_ , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = [json.loads(A_ ) for l in open(A_ )]
UpperCamelCase = os.path.dirname(A_ )
UpperCamelCase = tokenizer
UpperCamelCase = labels
UpperCamelCase = len(A_ )
UpperCamelCase = max_seq_length
UpperCamelCase = transforms
def __len__( self )-> Union[str, Any]:
'''simple docstring'''
return len(self.data )
def __getitem__( self , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=A_ ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase = sentence[0], sentence[1:-1], sentence[-1]
UpperCamelCase = sentence[: self.max_seq_length]
UpperCamelCase = torch.zeros(self.n_classes )
UpperCamelCase = 1
UpperCamelCase = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
UpperCamelCase = self.transforms(A_ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def A_( A : Union[str, Any]):
UpperCamelCase = [len(row['sentence']) for row in batch]
UpperCamelCase , UpperCamelCase = len(A), max(A)
UpperCamelCase = torch.zeros(A , A , dtype=torch.long)
UpperCamelCase = torch.zeros(A , A , dtype=torch.long)
for i_batch, (input_row, length) in enumerate(zip(A , A)):
UpperCamelCase = input_row['sentence']
UpperCamelCase = 1
UpperCamelCase = torch.stack([row['image'] for row in batch])
UpperCamelCase = torch.stack([row['label'] for row in batch])
UpperCamelCase = torch.stack([row['image_start_token'] for row in batch])
UpperCamelCase = torch.stack([row['image_end_token'] for row in batch])
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def A_( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def A_( ):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
])
| 251 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_SCREAMING_SNAKE_CASE = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_SCREAMING_SNAKE_CASE = tuple[int, int]
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> None:
_lowerCAmelCase = pos_x
_lowerCAmelCase = pos_y
_lowerCAmelCase = (pos_y, pos_x)
_lowerCAmelCase = goal_x
_lowerCAmelCase = goal_y
_lowerCAmelCase = g_cost
_lowerCAmelCase = parent
_lowerCAmelCase = self.calculate_heuristic()
_lowerCAmelCase = self.g_cost + self.h_cost
def _snake_case ( self ) -> float:
_lowerCAmelCase = self.pos_x - self.goal_x
_lowerCAmelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(__lowerCAmelCase ) + abs(__lowerCAmelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , _lowerCAmelCase ) -> bool:
return self.f_cost < other.f_cost
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCAmelCase )
_lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , __lowerCAmelCase )
_lowerCAmelCase = [self.start]
_lowerCAmelCase = []
_lowerCAmelCase = False
def _snake_case ( self ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCAmelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(__lowerCAmelCase )
self.closed_nodes.append(__lowerCAmelCase )
_lowerCAmelCase = self.get_successors(__lowerCAmelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowerCAmelCase )
else:
# retrieve the best current path
_lowerCAmelCase = self.open_nodes.pop(self.open_nodes.index(__lowerCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowerCAmelCase )
else:
self.open_nodes.append(__lowerCAmelCase )
return [self.start.pos]
def _snake_case ( self , _lowerCAmelCase ) -> list[Node]:
_lowerCAmelCase = []
for action in delta:
_lowerCAmelCase = parent.pos_x + action[1]
_lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowerCAmelCase , __lowerCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCAmelCase , ) )
return successors
def _snake_case ( self , _lowerCAmelCase ) -> list[TPosition]:
_lowerCAmelCase = node
_lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase = current_node.parent
path.reverse()
return path
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ) -> None:
_lowerCAmelCase = AStar(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = AStar(__lowerCAmelCase , __lowerCAmelCase )
_lowerCAmelCase = False
def _snake_case ( self ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_lowerCAmelCase = self.fwd_astar.open_nodes.pop(0 )
_lowerCAmelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
__lowerCAmelCase , __lowerCAmelCase )
self.fwd_astar.closed_nodes.append(__lowerCAmelCase )
self.bwd_astar.closed_nodes.append(__lowerCAmelCase )
_lowerCAmelCase = current_bwd_node
_lowerCAmelCase = current_fwd_node
_lowerCAmelCase = {
self.fwd_astar: self.fwd_astar.get_successors(__lowerCAmelCase ),
self.bwd_astar: self.bwd_astar.get_successors(__lowerCAmelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(__lowerCAmelCase )
else:
# retrieve the best current path
_lowerCAmelCase = astar.open_nodes.pop(
astar.open_nodes.index(__lowerCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(__lowerCAmelCase )
else:
astar.open_nodes.append(__lowerCAmelCase )
return [self.fwd_astar.start.pos]
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase ) -> list[TPosition]:
_lowerCAmelCase = self.fwd_astar.retrace_path(__lowerCAmelCase )
_lowerCAmelCase = self.bwd_astar.retrace_path(__lowerCAmelCase )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_SCREAMING_SNAKE_CASE = (0, 0)
_SCREAMING_SNAKE_CASE = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = AStar(init, goal)
_SCREAMING_SNAKE_CASE = a_star.search()
_SCREAMING_SNAKE_CASE = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
_SCREAMING_SNAKE_CASE = time.time()
_SCREAMING_SNAKE_CASE = BidirectionalAStar(init, goal)
_SCREAMING_SNAKE_CASE = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 158 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
A : str = 0
A : Any = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
A : Union[str, Any] = tuple[int, int]
class A :
'''simple docstring'''
def __init__( self : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Node | None , ) -> None:
"""simple docstring"""
A__ = pos_x
A__ = pos_y
A__ = (pos_y, pos_x)
A__ = goal_x
A__ = goal_y
A__ = g_cost
A__ = parent
A__ = self.calculate_heuristic()
A__ = self.g_cost + self.h_cost
def a_ ( self : Dict ) -> float:
"""simple docstring"""
A__ = self.pos_x - self.goal_x
A__ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(__lowerCAmelCase ) + abs(__lowerCAmelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : int , __lowerCAmelCase : Node ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class A :
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCAmelCase : TPosition , __lowerCAmelCase : TPosition ) -> Tuple:
"""simple docstring"""
A__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowerCAmelCase )
A__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , __lowerCAmelCase )
A__ = [self.start]
A__ = []
A__ = False
def a_ ( self : List[str] ) -> list[TPosition]:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
A__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(__lowerCAmelCase )
self.closed_nodes.append(__lowerCAmelCase )
A__ = self.get_successors(__lowerCAmelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowerCAmelCase )
else:
# retrieve the best current path
A__ = self.open_nodes.pop(self.open_nodes.index(__lowerCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowerCAmelCase )
else:
self.open_nodes.append(__lowerCAmelCase )
return [self.start.pos]
def a_ ( self : Optional[Any] , __lowerCAmelCase : Node ) -> list[Node]:
"""simple docstring"""
A__ = []
for action in delta:
A__ = parent.pos_x + action[1]
A__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowerCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowerCAmelCase , __lowerCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowerCAmelCase , ) )
return successors
def a_ ( self : List[Any] , __lowerCAmelCase : Node | None ) -> list[TPosition]:
"""simple docstring"""
A__ = node
A__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A__ = current_node.parent
path.reverse()
return path
class A :
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCAmelCase : TPosition , __lowerCAmelCase : TPosition ) -> None:
"""simple docstring"""
A__ = AStar(__lowerCAmelCase , __lowerCAmelCase )
A__ = AStar(__lowerCAmelCase , __lowerCAmelCase )
A__ = False
def a_ ( self : int ) -> list[TPosition]:
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
A__ = self.fwd_astar.open_nodes.pop(0 )
A__ = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
__lowerCAmelCase , __lowerCAmelCase )
self.fwd_astar.closed_nodes.append(__lowerCAmelCase )
self.bwd_astar.closed_nodes.append(__lowerCAmelCase )
A__ = current_bwd_node
A__ = current_fwd_node
A__ = {
self.fwd_astar: self.fwd_astar.get_successors(__lowerCAmelCase ),
self.bwd_astar: self.bwd_astar.get_successors(__lowerCAmelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(__lowerCAmelCase )
else:
# retrieve the best current path
A__ = astar.open_nodes.pop(
astar.open_nodes.index(__lowerCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(__lowerCAmelCase )
else:
astar.open_nodes.append(__lowerCAmelCase )
return [self.fwd_astar.start.pos]
def a_ ( self : List[str] , __lowerCAmelCase : Node , __lowerCAmelCase : Node ) -> list[TPosition]:
"""simple docstring"""
A__ = self.fwd_astar.retrace_path(__lowerCAmelCase )
A__ = self.bwd_astar.retrace_path(__lowerCAmelCase )
bwd_path.pop()
bwd_path.reverse()
A__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
A : Optional[int] = (0, 0)
A : int = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A : Dict = time.time()
A : Optional[Any] = AStar(init, goal)
A : Optional[int] = a_star.search()
A : Optional[int] = time.time() - start_time
print(F'''AStar execution time = {end_time:f} seconds''')
A : Dict = time.time()
A : Tuple = BidirectionalAStar(init, goal)
A : List[Any] = time.time() - bd_start_time
print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 274 | 0 |
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : List[Any] ,lowerCamelCase__ : list[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(_a )
SCREAMING_SNAKE_CASE = [0] * len_array
if len_array > 0:
SCREAMING_SNAKE_CASE = array[0]
for i in range(1 ,_a ):
SCREAMING_SNAKE_CASE = self.prefix_sum[i - 1] + array[i]
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : int ,lowerCamelCase__ : int ) -> Any:
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(_a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : str = AltDiffusionPipeline
__snake_case : int = TEXT_TO_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
__snake_case : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
__snake_case : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,)
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=5002 ,)
SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
SCREAMING_SNAKE_CASE = 77
SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : int=0 ) -> Any:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = text_encoder
SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """A photo of an astronaut"""
SCREAMING_SNAKE_CASE = alt_pipe(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=lowerCamelCase__ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=5002 ,)
# TODO: remove after fixing the non-deterministic text encoder
SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = text_encoder
SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" ,safety_checker=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = alt_pipe([prompt] ,generator=lowerCamelCase__ ,guidance_scale=6.0 ,num_inference_steps=20 ,output_type="""np""" )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" ,subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" ,scheduler=lowerCamelCase__ ,safety_checker=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = alt_pipe.to(lowerCamelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = alt_pipe([prompt] ,generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type="""numpy""" )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 193 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = StableDiffusionInstructPixaPixPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=_snake_case )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_lowerCAmelCase = CLIPTextModel(_snake_case )
_lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def snake_case ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(_snake_case ) ).convert("""RGB""" )
if str(_snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(_snake_case )
else:
_lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**_snake_case )
_lowerCAmelCase = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = sd_pipe(**_snake_case ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**_snake_case )
_lowerCAmelCase = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = """french fries"""
_lowerCAmelCase = sd_pipe(**_snake_case , negative_prompt=_snake_case )
_lowerCAmelCase = output.images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**_snake_case )
_lowerCAmelCase = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = [inputs["""prompt"""]] * 2
_lowerCAmelCase = np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
_lowerCAmelCase = torch.from_numpy(_snake_case ).unsqueeze(0 ).to(_snake_case )
_lowerCAmelCase = image / 2 + 0.5
_lowerCAmelCase = image.permute(0 , 3 , 1 , 2 )
_lowerCAmelCase = image.repeat(2 , 1 , 1 , 1 )
_lowerCAmelCase = sd_pipe(**_snake_case ).images
_lowerCAmelCase = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
_lowerCAmelCase = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
_lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**_snake_case )
_lowerCAmelCase = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = sd_pipe(**_snake_case ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = [round(_snake_case , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(_snake_case ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = StableDiffusionInstructPixaPixPipeline(**_snake_case )
_lowerCAmelCase = VaeImageProcessor(do_resize=_snake_case , do_normalize=_snake_case )
_lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = pipe(**self.get_dummy_inputs_by_type(_snake_case , input_image_type="""pt""" ) )[0]
_lowerCAmelCase = components["""vae"""]
_lowerCAmelCase = self.get_dummy_inputs_by_type(_snake_case , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_lowerCAmelCase = vae.encode(inputs[image_param] ).latent_dist.mode()
_lowerCAmelCase = pipe(**_snake_case )[0]
_lowerCAmelCase = np.abs(out - out_latents_inputs ).max()
self.assertLess(_snake_case , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self , _snake_case=0 ):
"""simple docstring"""
_lowerCAmelCase = torch.manual_seed(_snake_case )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
_lowerCAmelCase = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
_lowerCAmelCase = self.get_inputs()
_lowerCAmelCase = pipe(**_snake_case ).images
_lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_snake_case )
_lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
_lowerCAmelCase = self.get_inputs()
_lowerCAmelCase = pipe(**_snake_case ).images
_lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_snake_case )
_lowerCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
_lowerCAmelCase = self.get_inputs()
_lowerCAmelCase = pipe(**_snake_case ).images
_lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = 0
def callback_fn(_snake_case , _snake_case , _snake_case ) -> None:
_lowerCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_lowerCAmelCase = latents[0, -3:, -3:, -1]
_lowerCAmelCase = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_lowerCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_lowerCAmelCase = latents[0, -3:, -3:, -1]
_lowerCAmelCase = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_lowerCAmelCase = False
_lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_snake_case , torch_dtype=torch.floataa )
_lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
_lowerCAmelCase = self.get_inputs()
pipe(**_snake_case , callback=_snake_case , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def snake_case ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_snake_case , torch_dtype=torch.floataa )
_lowerCAmelCase = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase = self.get_inputs()
_lowerCAmelCase = pipe(**_snake_case )
_lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_lowerCAmelCase = inputs["""image"""].resize((504, 504) )
_lowerCAmelCase = """timbrooks/instruct-pix2pix"""
_lowerCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_snake_case , safety_checker=_snake_case , )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
_lowerCAmelCase = pipe(**_snake_case )
_lowerCAmelCase = output.images[0]
_lowerCAmelCase = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
_lowerCAmelCase = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 82 |
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def A_ ( snake_case ):
return 1 / (1 + np.exp(-z ))
def A_ ( snake_case , snake_case ):
return (-y * np.log(snake_case ) - (1 - y) * np.log(1 - h )).mean()
def A_ ( snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Dict = np.dot(snake_case , snake_case )
return np.sum(y * scores - np.log(1 + np.exp(snake_case ) ) )
def A_ ( snake_case , snake_case , snake_case , snake_case=70000 ):
SCREAMING_SNAKE_CASE:List[str] = np.zeros(x.shape[1] )
for iterations in range(snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = np.dot(snake_case , snake_case )
SCREAMING_SNAKE_CASE:Dict = sigmoid_function(snake_case )
SCREAMING_SNAKE_CASE:List[str] = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE:Any = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE:Dict = np.dot(snake_case , snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = sigmoid_function(snake_case )
SCREAMING_SNAKE_CASE:Dict = cost_function(snake_case , snake_case )
if iterations % 100 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
A_ = datasets.load_iris()
A_ = iris.data[:, :2]
A_ = (iris.target != 0) * 1
A_ = 0.1
A_ = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print("theta: ", theta) # printing the theta i.e our weights vector
def A_ ( snake_case ):
return sigmoid_function(
np.dot(snake_case , snake_case ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((A_) , (A_)) = (x[:, 0].min(), x[:, 0].max())
((A_) , (A_)) = (x[:, 1].min(), x[:, 1].max())
((A_) , (A_)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
A_ = np.c_[xxa.ravel(), xxa.ravel()]
A_ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 139 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any]=7 , snake_case_ : int=3 , snake_case_ : Union[str, Any]=18 , snake_case_ : Union[str, Any]=30 , snake_case_ : Any=400 , snake_case_ : int=True , snake_case_ : List[Any]=None , snake_case_ : Any=True , ):
UpperCamelCase_: Dict = size if size is not None else {"""height""": 18, """width""": 18}
UpperCamelCase_: List[Any] = parent
UpperCamelCase_: Tuple = batch_size
UpperCamelCase_: Union[str, Any] = num_channels
UpperCamelCase_: Optional[Any] = image_size
UpperCamelCase_: Dict = min_resolution
UpperCamelCase_: Optional[Any] = max_resolution
UpperCamelCase_: List[str] = do_resize
UpperCamelCase_: List[str] = size
UpperCamelCase_: Any = apply_ocr
def lowerCAmelCase__ ( self : Dict ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Union[str, Any] = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """apply_ocr""" ) )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
UpperCamelCase_: Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase__ ( self : List[Any] ):
pass
def lowerCAmelCase__ ( self : Union[str, Any] ):
# Initialize image_processing
UpperCamelCase_: Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_: List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
UpperCamelCase_: List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , snake_case_ )
self.assertIsInstance(encoding.boxes , snake_case_ )
# Test batched
UpperCamelCase_: Optional[int] = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase__ ( self : List[str] ):
# Initialize image_processing
UpperCamelCase_: int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_: List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
UpperCamelCase_: List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase_: Dict = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase__ ( self : int ):
# Initialize image_processing
UpperCamelCase_: Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_: Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
UpperCamelCase_: List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase_: Dict = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase__ ( self : Tuple ):
# with apply_OCR = True
UpperCamelCase_: Optional[int] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase_: Optional[Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
UpperCamelCase_: str = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
UpperCamelCase_: Optional[int] = image_processing(snake_case_ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase_: int = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
UpperCamelCase_: Tuple = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , snake_case_ )
self.assertListEqual(encoding.boxes , snake_case_ )
# with apply_OCR = False
UpperCamelCase_: Any = LayoutLMvaImageProcessor(apply_ocr=snake_case_ )
UpperCamelCase_: List[str] = image_processing(snake_case_ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 357 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase_ : List[str] = False
lowerCamelCase_ : int = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = """ybelkada/fonts"""
def A__ ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
requires_backends(lowerCamelCase , ["""torch"""] )
_check_torch_version()
UpperCamelCase_: Tuple = image_tensor.unsqueeze(0 )
UpperCamelCase_: Any = torch.nn.functional.unfold(lowerCamelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) )
UpperCamelCase_: int = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , lowerCamelCase , lowerCamelCase , -1 )
UpperCamelCase_: Any = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def A__ ( lowerCamelCase , lowerCamelCase = 36 , lowerCamelCase = "black" , lowerCamelCase = "white" , lowerCamelCase = 5 , lowerCamelCase = 5 , lowerCamelCase = 5 , lowerCamelCase = 5 , lowerCamelCase = None , lowerCamelCase = None , ) -> Image.Image:
requires_backends(lowerCamelCase , """vision""" )
# Add new lines so that each line is no more than 80 characters.
UpperCamelCase_: List[str] = textwrap.TextWrapper(width=80 )
UpperCamelCase_: Optional[int] = wrapper.wrap(text=lowerCamelCase )
UpperCamelCase_: List[str] = """\n""".join(lowerCamelCase )
if font_bytes is not None and font_path is None:
UpperCamelCase_: List[Any] = io.BytesIO(lowerCamelCase )
elif font_path is not None:
UpperCamelCase_: List[Any] = font_path
else:
UpperCamelCase_: Tuple = hf_hub_download(lowerCamelCase , """Arial.TTF""" )
UpperCamelCase_: Optional[Any] = ImageFont.truetype(lowerCamelCase , encoding="""UTF-8""" , size=lowerCamelCase )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCamelCase_: str = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , lowerCamelCase ) )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Optional[int] = temp_draw.textbbox((0, 0) , lowerCamelCase , lowerCamelCase )
# Create the actual image with a bit of padding around the text.
UpperCamelCase_: Optional[int] = text_width + left_padding + right_padding
UpperCamelCase_: List[str] = text_height + top_padding + bottom_padding
UpperCamelCase_: Union[str, Any] = Image.new("""RGB""" , (image_width, image_height) , lowerCamelCase )
UpperCamelCase_: Optional[Any] = ImageDraw.Draw(lowerCamelCase )
draw.text(xy=(left_padding, top_padding) , text=lowerCamelCase , fill=lowerCamelCase , font=lowerCamelCase )
return image
def A__ ( lowerCamelCase , lowerCamelCase , **lowerCamelCase ) -> List[str]:
requires_backends(lowerCamelCase , """vision""" )
# Convert to PIL image if necessary
UpperCamelCase_: List[str] = to_pil_image(lowerCamelCase )
UpperCamelCase_: Union[str, Any] = render_text(lowerCamelCase , **lowerCamelCase )
UpperCamelCase_: Tuple = max(header_image.width , image.width )
UpperCamelCase_: Tuple = int(image.height * (new_width / image.width) )
UpperCamelCase_: Dict = int(header_image.height * (new_width / header_image.width) )
UpperCamelCase_: str = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
UpperCamelCase_: Optional[Any] = to_numpy_array(lowerCamelCase )
if infer_channel_dimension_format(lowerCamelCase ) == ChannelDimension.LAST:
UpperCamelCase_: Tuple = to_channel_dimension_format(lowerCamelCase , ChannelDimension.LAST )
return new_image
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ["""flattened_patches"""]
def __init__( self : int , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : int = 2048 , snake_case_ : bool = False , **snake_case_ : Any , ):
super().__init__(**snake_case_ )
UpperCamelCase_: int = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
UpperCamelCase_: Tuple = do_normalize
UpperCamelCase_: List[Any] = do_convert_rgb
UpperCamelCase_: Tuple = max_patches
UpperCamelCase_: Tuple = is_vqa
def lowerCAmelCase__ ( self : int , snake_case_ : np.ndarray , snake_case_ : int , snake_case_ : dict , **snake_case_ : Tuple ):
requires_backends(self.extract_flattened_patches , """torch""" )
_check_torch_version()
# convert to torch
UpperCamelCase_: int = to_channel_dimension_format(snake_case_ , ChannelDimension.FIRST )
UpperCamelCase_: List[str] = torch.from_numpy(snake_case_ )
UpperCamelCase_, UpperCamelCase_: List[Any] = patch_size["""height"""], patch_size["""width"""]
UpperCamelCase_, UpperCamelCase_: Tuple = get_image_size(snake_case_ )
# maximize scale s.t.
UpperCamelCase_: List[Any] = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCamelCase_: Any = max(min(math.floor(scale * image_height / patch_height ) , snake_case_ ) , 1 )
UpperCamelCase_: List[str] = max(min(math.floor(scale * image_width / patch_width ) , snake_case_ ) , 1 )
UpperCamelCase_: int = max(num_feasible_rows * patch_height , 1 )
UpperCamelCase_: Optional[Any] = max(num_feasible_cols * patch_width , 1 )
UpperCamelCase_: str = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=snake_case_ , antialias=snake_case_ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCamelCase_: List[str] = torch_extract_patches(snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase_: List[Any] = patches.shape
UpperCamelCase_: List[str] = patches_shape[1]
UpperCamelCase_: Optional[Any] = patches_shape[2]
UpperCamelCase_: List[str] = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCamelCase_: Union[str, Any] = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCamelCase_: Optional[Any] = torch.arange(snake_case_ ).reshape([rows, 1] ).repeat(1 , snake_case_ ).reshape([rows * columns, 1] )
UpperCamelCase_: Optional[int] = torch.arange(snake_case_ ).reshape([1, columns] ).repeat(snake_case_ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCamelCase_: Union[str, Any] = row_ids.to(torch.floataa )
UpperCamelCase_: str = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_: Optional[Any] = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_: Tuple = torch.nn.functional.pad(snake_case_ , [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCamelCase_: List[Any] = to_numpy_array(snake_case_ )
return result
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : np.ndarray , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Tuple ):
if image.dtype == np.uinta:
UpperCamelCase_: List[str] = image.astype(np.floataa )
# take mean across the whole `image`
UpperCamelCase_: str = np.mean(snake_case_ )
UpperCamelCase_: str = np.std(snake_case_ )
UpperCamelCase_: str = max(snake_case_ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : str , snake_case_ : ImageInput , snake_case_ : Optional[str] = None , snake_case_ : bool = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[Dict[str, int]] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : Union[str, Any] , ):
UpperCamelCase_: Tuple = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_: Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase_: Optional[Any] = patch_size if patch_size is not None else self.patch_size
UpperCamelCase_: Optional[int] = max_patches if max_patches is not None else self.max_patches
UpperCamelCase_: Tuple = self.is_vqa
if kwargs.get("""data_format""" , snake_case_ ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
UpperCamelCase_: Dict = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase_: str = [convert_to_rgb(snake_case_ ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase_: Union[str, Any] = [to_numpy_array(snake_case_ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
UpperCamelCase_: List[Any] = kwargs.pop("""font_bytes""" , snake_case_ )
UpperCamelCase_: List[Any] = kwargs.pop("""font_path""" , snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: str = [header_text] * len(snake_case_ )
UpperCamelCase_: str = [
render_header(snake_case_ , header_text[i] , font_bytes=snake_case_ , font_path=snake_case_ )
for i, image in enumerate(snake_case_ )
]
if do_normalize:
UpperCamelCase_: Union[str, Any] = [self.normalize(image=snake_case_ ) for image in images]
# convert to torch tensor and permute
UpperCamelCase_: str = [
self.extract_flattened_patches(image=snake_case_ , max_patches=snake_case_ , patch_size=snake_case_ )
for image in images
]
# create attention mask in numpy
UpperCamelCase_: List[Any] = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCamelCase_: Optional[Any] = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=snake_case_ )
return encoded_outputs
| 223 | 0 |
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_UpperCamelCase : Optional[Any] = TypeVar('T')
class a ( Generic[T] ):
def __init__( self , _lowerCamelCase = True ):
lowercase = {} # dictionary of lists
lowercase = directed
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCamelCase )
self.adj_list[destination_vertex].append(_lowerCamelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCamelCase )
lowercase = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_lowerCamelCase )
lowercase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowercase = [destination_vertex]
lowercase = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCamelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCamelCase )
lowercase = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowercase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowercase = [destination_vertex]
lowercase = []
return self
def __repr__( self ):
return pformat(self.adj_list )
| 220 |
"""simple docstring"""
_UpperCamelCase : List[str] = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
_UpperCamelCase : str = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 1_2,
'Pm': 1_5,
'Em': 1_8,
'Zm': 2_1,
'Ym': 2_4,
}
def _SCREAMING_SNAKE_CASE ( __snake_case : float , __snake_case : str , __snake_case : str ):
'''simple docstring'''
lowercase = from_type.lower().strip('s' )
lowercase = to_type.lower().strip('s' )
lowercase = UNIT_SYMBOL.get(__snake_case , __snake_case )
lowercase = UNIT_SYMBOL.get(__snake_case , __snake_case )
if from_sanitized not in METRIC_CONVERSION:
lowercase = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(__snake_case )}'
)
raise ValueError(__snake_case )
if to_sanitized not in METRIC_CONVERSION:
lowercase = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(__snake_case )}'
)
raise ValueError(__snake_case )
lowercase = METRIC_CONVERSION[from_sanitized]
lowercase = METRIC_CONVERSION[to_sanitized]
lowercase = 1
if from_exponent > to_exponent:
lowercase = from_exponent - to_exponent
else:
lowercase = -(to_exponent - from_exponent)
return value * pow(10 , __snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 220 | 1 |
def lowerCAmelCase_ ( __lowerCamelCase ):
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
__snake_case : List[str] = sum(__lowerCamelCase ) / len(__lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 134 |
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return x if y == 0 else greatest_common_divisor(__lowerCamelCase , x % y )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return (x * y) // greatest_common_divisor(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase = 2_0 ):
__snake_case : Optional[Any] = 1
for i in range(1 , n + 1 ):
__snake_case : Any = lcm(__lowerCamelCase , __lowerCamelCase )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 134 | 1 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
@property
def __A ( self : Tuple ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __A ( self : List[str] ):
A_ = ort.SessionOptions()
A_ = False
return options
def __A ( self : Dict ):
A_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
A_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
A_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
A_ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=UpperCAmelCase , feature_extractor=UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = "A red cat sitting on a park bench"
A_ = np.random.RandomState(0 )
A_ = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=UpperCAmelCase , output_type="np" , )
A_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2 | 312 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _a :
"""simple docstring"""
@property
def __A ( self : Union[str, Any] ):
return self.get_dummy_input()
@property
def __A ( self : int ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[Any]=False , ):
A_ = 4
A_ = 32
A_ = (32, 32)
A_ = torch.manual_seed(0 )
A_ = torch.device(UpperCAmelCase )
A_ = (batch_size, num_channels) + sizes
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase )
A_ = {"hidden_states": hidden_states}
if include_temb:
A_ = 128
A_ = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase , device=UpperCAmelCase )
if include_res_hidden_states_tuple:
A_ = torch.manual_seed(1 )
A_ = (randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ),)
if include_encoder_hidden_states:
A_ = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase )
if include_skip_sample:
A_ = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase , device=UpperCAmelCase )
return dummy_input
def __A ( self : Optional[int] ):
A_ = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
A_ = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
A_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : List[str] , UpperCAmelCase : Optional[Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
unet_block.to(UpperCAmelCase )
unet_block.eval()
with torch.no_grad():
A_ = unet_block(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
self.assertEqual(output.shape , self.output_shape )
A_ = output[0, -1, -3:, -3:]
A_ = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
assert torch_all_close(output_slice.flatten() , UpperCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def __A ( self : Union[str, Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
A_ = model(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
A_ = torch.device(UpperCAmelCase )
A_ = randn_tensor(output.shape , device=UpperCAmelCase )
A_ = torch.nn.functional.mse_loss(UpperCAmelCase , UpperCAmelCase )
loss.backward() | 312 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : List[str] = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 354 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Any=7 , UpperCAmelCase : int=3 , UpperCAmelCase : Optional[Any]=18 , UpperCAmelCase : str=30 , UpperCAmelCase : List[str]=400 , UpperCAmelCase : int=True , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=True , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=[0.5, 0.5, 0.5] , UpperCAmelCase : Union[str, Any]=[0.5, 0.5, 0.5] , ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = size if size is not None else {'shortest_edge': 18}
lowerCamelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : str = num_channels
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : List[str] = min_resolution
lowerCamelCase__ : Union[str, Any] = max_resolution
lowerCamelCase__ : Optional[int] = do_resize
lowerCamelCase__ : int = size
lowerCamelCase__ : Optional[int] = do_center_crop
lowerCamelCase__ : str = crop_size
lowerCamelCase__ : Optional[Any] = do_normalize
lowerCamelCase__ : Tuple = image_mean
lowerCamelCase__ : Union[str, Any] = image_std
def A_ ( self : Any ) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = LevitImageProcessor if is_vision_available() else None
def A_ ( self : Tuple ) -> Tuple:
lowerCamelCase__ : str = LevitImageProcessingTester(self )
@property
def A_ ( self : Tuple ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Optional[int] ) -> int:
lowerCamelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'size' ) )
def A_ ( self : List[Any] ) -> Optional[int]:
lowerCamelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCamelCase__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def A_ ( self : str ) -> str:
pass
def A_ ( self : Optional[int] ) -> List[Any]:
# Initialize image_processing
lowerCamelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
lowerCamelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase__ : Optional[Any] = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A_ ( self : List[str] ) -> List[Any]:
# Initialize image_processing
lowerCamelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
lowerCamelCase__ : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase__ : Any = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A_ ( self : str ) -> int:
# Initialize image_processing
lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
lowerCamelCase__ : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase__ : Any = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 45 | 0 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_a : str = StableUnCLIPPipeline
_a : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
_a : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_a : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_a : Optional[Any] = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = 3_2
__lowerCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__lowerCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
__lowerCAmelCase = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
__lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
__lowerCAmelCase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
__lowerCAmelCase = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL()
__lowerCAmelCase = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(_A )
else:
__lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A )
__lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
__lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase = pipe("anime turle" , generator=_A , output_type="np" )
__lowerCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 92 |
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Tuple ):
'''simple docstring'''
debug_launcher(test_script.main )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 181 | 0 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : int = TypeVar("""DatasetType""", Dataset, IterableDataset)
def lowerCamelCase__ ( A : List[DatasetType] , A : Optional[List[float]] = None , A : Optional[int] = None , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(A )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
UpperCAmelCase , UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
def lowerCamelCase__ ( A : List[DatasetType] , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : int = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(A )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(A ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.""" )
if i == 0:
UpperCAmelCase , UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A , info=A , split=A , axis=A )
else:
return _concatenate_iterable_datasets(A , info=A , split=A , axis=A )
| 364 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowerCamelCase__ ( A : int , A : int , A : int , A : int , A : int , A : int ):
'''simple docstring'''
if (ksize % 2) == 0:
UpperCAmelCase = ksize + 1
UpperCAmelCase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(A ):
for x in range(A ):
# distance from center
UpperCAmelCase = x - ksize // 2
UpperCAmelCase = y - ksize // 2
# degree to radiant
UpperCAmelCase = theta / 1_80 * np.pi
UpperCAmelCase = np.cos(_theta )
UpperCAmelCase = np.sin(_theta )
# get kernel x
UpperCAmelCase = cos_theta * px + sin_theta * py
# get kernel y
UpperCAmelCase = -sin_theta * px + cos_theta * py
# fill kernel
UpperCAmelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_lowercase : Tuple = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
_lowercase : int = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_lowercase : List[str] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_lowercase : List[Any] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_lowercase : Optional[int] = out / out.max() * 255
_lowercase : Optional[int] = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 91 | 0 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
a_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
for attribute in key.split("."):
SCREAMING_SNAKE_CASE : str = getattr(_a , _a)
if weight_type is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(_a , _a).shape
else:
SCREAMING_SNAKE_CASE : List[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}")
if weight_type == "weight":
SCREAMING_SNAKE_CASE : str = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : List[Any] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : List[str] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : List[Any] = value
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : List[str] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : str = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : str = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE : Any = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(".")[:-1]) != key):
# special case since naming is very similar
continue
SCREAMING_SNAKE_CASE : Optional[Any] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : str = name.split(_a)[0].split(".")[-2]
SCREAMING_SNAKE_CASE : int = mapped_key.replace("*" , _a)
if "weight_g" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE : Tuple = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE : Optional[int] = "weight"
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
set_recursively(_a , _a , _a , _a , _a)
continue
if not is_used:
unused_weights.append(_a)
logger.warning(f"Unused weights: {unused_weights}")
def lowerCamelCase__ ( _a , _a , _a , _a , _a):
SCREAMING_SNAKE_CASE : Optional[Any] = full_name.split("conv_layers.")[-1]
SCREAMING_SNAKE_CASE : List[Any] = name.split(".")
SCREAMING_SNAKE_CASE : int = int(items[0])
SCREAMING_SNAKE_CASE : Union[str, Any] = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.")
SCREAMING_SNAKE_CASE : Any = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.")
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.")
SCREAMING_SNAKE_CASE : Optional[int] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.")
SCREAMING_SNAKE_CASE : List[str] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
else:
unused_weights.append(_a)
@torch.no_grad()
def lowerCamelCase__ ( _a , _a , _a=None , _a=None , _a=True):
if config_path is not None:
SCREAMING_SNAKE_CASE : str = UniSpeechSatConfig.from_pretrained(_a)
else:
SCREAMING_SNAKE_CASE : int = UniSpeechSatConfig()
SCREAMING_SNAKE_CASE : int = ""
if is_finetuned:
SCREAMING_SNAKE_CASE : Any = UniSpeechSatForCTC(_a)
else:
SCREAMING_SNAKE_CASE : Optional[int] = UniSpeechSatForPreTraining(_a)
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/")[:-1])})
SCREAMING_SNAKE_CASE : List[str] = model[0].eval()
recursively_load_weights(_a , _a)
hf_wavavec.save_pretrained(_a)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
a_ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 76 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , a : int , a : Optional[int]=13 , a : Optional[int]=3 , a : int=224 , a : Optional[int]=30 , a : int=400 , a : Union[str, Any]=True , a : int=None , a : Tuple=True , a : Tuple=[0.5, 0.5, 0.5] , a : Optional[int]=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : str = max_resolution
SCREAMING_SNAKE_CASE : int = do_resize
SCREAMING_SNAKE_CASE : List[Any] = size
SCREAMING_SNAKE_CASE : int = do_normalize
SCREAMING_SNAKE_CASE : Tuple = image_mean
SCREAMING_SNAKE_CASE : Tuple = image_std
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _UpperCamelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =ViTImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = EfficientFormerImageProcessorTester(self )
@property
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : str = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase ( self : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Any = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , ) | 76 | 1 |
def _lowercase ( _UpperCAmelCase = 10**9 ) -> int:
lowerCamelCase =1
lowerCamelCase =2
lowerCamelCase =0
lowerCamelCase =0
lowerCamelCase =0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowerCamelCase =2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"{solution() = }")
| 262 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _lowercase ( ) -> str:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_UpperCAmelCase ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def _lowercase ( ) -> Union[str, Any]:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def _lowercase ( ) -> int:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_UpperCAmelCase ):
http_head("""https://huggingface.co""" )
| 262 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __magic_name__ ( unittest.TestCase):
def __init__( self : List[str] , lowercase_ : str , lowercase_ : Optional[int]=13 , lowercase_ : Any=7 , lowercase_ : Dict=True , lowercase_ : str=True , lowercase_ : List[Any]=True , lowercase_ : str=True , lowercase_ : str=99 , lowercase_ : Optional[Any]=32 , lowercase_ : Optional[int]=5 , lowercase_ : Union[str, Any]=4 , lowercase_ : List[Any]=37 , lowercase_ : List[Any]="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : int=512 , lowercase_ : Optional[Any]=16 , lowercase_ : Tuple=2 , lowercase_ : Tuple=0.02 , lowercase_ : Dict=4 , ):
lowercase_ : Dict = parent
lowercase_ : Dict = batch_size
lowercase_ : Union[str, Any] = seq_length
lowercase_ : Dict = is_training
lowercase_ : Optional[int] = use_attention_mask
lowercase_ : Tuple = use_token_type_ids
lowercase_ : int = use_labels
lowercase_ : str = vocab_size
lowercase_ : List[Any] = hidden_size
lowercase_ : int = num_hidden_layers
lowercase_ : List[str] = num_attention_heads
lowercase_ : List[str] = intermediate_size
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : int = max_position_embeddings
lowercase_ : Union[str, Any] = type_vocab_size
lowercase_ : Any = type_sequence_label_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Any = num_choices
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : List[str] = None
if self.use_attention_mask:
lowercase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Union[str, Any] = None
if self.use_token_type_ids:
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : Tuple = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Dict = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Dict = config_and_inputs
lowercase_ : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : int = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = config_and_inputs
lowercase_ : List[str] = True
lowercase_ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = True
UpperCamelCase__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Tuple = FlaxRobertaModelTester(self )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
for model_class_name in self.all_model_classes:
lowercase_ : int = model_class_name.from_pretrained("""roberta-base""" , from_pt=lowercase_ )
lowercase_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase_ )
| 239 | '''simple docstring'''
_lowercase : str = tuple[float, float, float]
_lowercase : List[Any] = tuple[float, float, float]
def lowerCamelCase ( UpperCAmelCase__ : Pointad , UpperCAmelCase__ : Pointad ) -> Vectorad:
lowercase_ : List[str] = end_pointa[0] - end_pointa[0]
lowercase_ : Union[str, Any] = end_pointa[1] - end_pointa[1]
lowercase_ : List[Any] = end_pointa[2] - end_pointa[2]
return (x, y, z)
def lowerCamelCase ( UpperCAmelCase__ : Vectorad , UpperCAmelCase__ : Vectorad ) -> Vectorad:
lowercase_ : List[Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowercase_ : Union[str, Any] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowercase_ : List[str] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def lowerCamelCase ( UpperCAmelCase__ : Vectorad , UpperCAmelCase__ : int ) -> bool:
return tuple(round(UpperCAmelCase__ , UpperCAmelCase__ ) for x in vector ) == (0, 0, 0)
def lowerCamelCase ( UpperCAmelCase__ : Pointad , UpperCAmelCase__ : Pointad , UpperCAmelCase__ : Pointad , UpperCAmelCase__ : int = 10 ) -> bool:
lowercase_ : Dict = create_vector(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Optional[int] = create_vector(UpperCAmelCase__ , UpperCAmelCase__ )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase__ , UpperCAmelCase__ ) , UpperCAmelCase__ )
| 239 | 1 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ = logging.getLogger()
def lowerCamelCase ( ) -> List[Any]:
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('-f' )
lowerCAmelCase_ = parser.parse_args()
return args.f
class a_ ( a_ ):
'''simple docstring'''
def _lowercase ( self ) -> None:
'''simple docstring'''
lowerCAmelCase_ = logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase_ )
def _lowercase ( self , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , 'run_glue_deebert.py' )
with patch.object(lowercase_ , 'argv' , lowercase_ ):
lowerCAmelCase_ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowercase_ , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(lowercase_ )
lowerCAmelCase_ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowercase_ )
lowerCAmelCase_ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowercase_ )
| 14 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCamelCase ( a_ , a_ ) -> Tuple:
lowerCAmelCase_ = XCLIPTextConfig()
# derive patch size from model name
lowerCAmelCase_ = model_name.find('patch' )
lowerCAmelCase_ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
lowerCAmelCase_ = XCLIPVisionConfig(patch_size=a_ , num_frames=a_ )
if "large" in model_name:
lowerCAmelCase_ = 768
lowerCAmelCase_ = 3_072
lowerCAmelCase_ = 12
lowerCAmelCase_ = 1_024
lowerCAmelCase_ = 4_096
lowerCAmelCase_ = 16
lowerCAmelCase_ = 24
lowerCAmelCase_ = 768
lowerCAmelCase_ = 3_072
if model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ = 336
lowerCAmelCase_ = XCLIPConfig.from_text_vision_configs(a_ , a_ )
if "large" in model_name:
lowerCAmelCase_ = 768
return config
def lowerCamelCase ( a_ ) -> List[str]:
# text encoder
if name == "token_embedding.weight":
lowerCAmelCase_ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
lowerCAmelCase_ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
lowerCAmelCase_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
lowerCAmelCase_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
lowerCAmelCase_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
lowerCAmelCase_ = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
lowerCAmelCase_ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
lowerCAmelCase_ = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
lowerCAmelCase_ = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
lowerCAmelCase_ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
lowerCAmelCase_ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
lowerCAmelCase_ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
lowerCAmelCase_ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
lowerCAmelCase_ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
lowerCAmelCase_ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
lowerCAmelCase_ = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
lowerCAmelCase_ = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
lowerCAmelCase_ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
lowerCAmelCase_ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
lowerCAmelCase_ = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
lowerCAmelCase_ = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
lowerCAmelCase_ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def lowerCamelCase ( a_ , a_ ) -> Dict:
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ = orig_state_dict.pop(a_ )
if "attn.in_proj" in key:
lowerCAmelCase_ = key.split('.' )
if key.startswith('visual' ):
lowerCAmelCase_ = key_split[3]
lowerCAmelCase_ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowerCAmelCase_ = val[
:dim, :
]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[
-dim:, :
]
else:
lowerCAmelCase_ = val[
:dim
]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[
-dim:
]
else:
if "weight" in key:
lowerCAmelCase_ = val[
:dim, :
]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[
-dim:, :
]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[-dim:]
elif key.startswith('mit' ):
lowerCAmelCase_ = key_split[2]
lowerCAmelCase_ = config.vision_config.mit_hidden_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[dim : dim * 2, :]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[dim : dim * 2]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = key_split[2]
lowerCAmelCase_ = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = rename_key(a_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowerCAmelCase_ = val.T
lowerCAmelCase_ = val
return orig_state_dict
def lowerCamelCase ( a_ ) -> List[str]:
if num_frames == 8:
lowerCAmelCase_ = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
lowerCAmelCase_ = 'eating_spaghetti.npy'
elif num_frames == 32:
lowerCAmelCase_ = 'eating_spaghetti_32_frames.npy'
lowerCAmelCase_ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=a_ , repo_type='dataset' , )
lowerCAmelCase_ = np.load(a_ )
return list(a_ )
def lowerCamelCase ( a_ , a_=None , a_=False ) -> List[Any]:
lowerCAmelCase_ = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
lowerCAmelCase_ = model_to_url[model_name]
lowerCAmelCase_ = 8
if "16-frames" in model_name:
lowerCAmelCase_ = 16
elif "shot" in model_name:
lowerCAmelCase_ = 32
lowerCAmelCase_ = get_xclip_config(a_ , a_ )
lowerCAmelCase_ = XCLIPModel(a_ )
model.eval()
if "drive" in checkpoint_url:
lowerCAmelCase_ = 'pytorch_model.bin'
gdown.cached_download(a_ , a_ , quiet=a_ )
lowerCAmelCase_ = torch.load(a_ , map_location='cpu' )['model']
else:
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(a_ )['model']
lowerCAmelCase_ = convert_state_dict(a_ , a_ )
lowerCAmelCase_ = XCLIPModel(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = model.load_state_dict(a_ , strict=a_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowerCAmelCase_ = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
lowerCAmelCase_ = VideoMAEImageProcessor(size=a_ )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase_ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase_ = XCLIPProcessor(image_processor=a_ , tokenizer=a_ )
lowerCAmelCase_ = prepare_video(a_ )
lowerCAmelCase_ = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=a_ , return_tensors='pt' , padding=a_ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
lowerCAmelCase_ = model(**a_ )
# Verify outputs
lowerCAmelCase_ = outputs.logits_per_video
lowerCAmelCase_ = logits_per_video.softmax(dim=1 )
print('Probs:' , a_ )
# kinetics-400
if model_name == "xclip-base-patch32":
lowerCAmelCase_ = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowerCAmelCase_ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
lowerCAmelCase_ = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowerCAmelCase_ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
lowerCAmelCase_ = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowerCAmelCase_ = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowerCAmelCase_ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowerCAmelCase_ = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowerCAmelCase_ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowerCAmelCase_ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowerCAmelCase_ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowerCAmelCase_ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowerCAmelCase_ = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowerCAmelCase_ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowerCAmelCase_ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(a_ , a_ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a_ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(a_ , organization='nielsr' )
processor.push_to_hub(a_ , organization='nielsr' )
slow_tokenizer.push_to_hub(a_ , organization='nielsr' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 14 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Optional[Any] = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
a_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 55 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
a_ : Optional[Any] = logging.getLogger(__name__)
@dataclass
class snake_case :
"""simple docstring"""
_lowerCamelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_lowerCamelCase = field(
default=lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_lowerCamelCase = field(
default=lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_lowerCamelCase = field(
default=lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
_lowerCamelCase = field(default=lowercase , metadata={"help": "Whether tp freeze the encoder."} )
_lowerCamelCase = field(default=lowercase , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class snake_case :
"""simple docstring"""
_lowerCamelCase = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
_lowerCamelCase = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
_lowerCamelCase = field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_lowerCamelCase = field(
default=1_28 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_lowerCamelCase = field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
_lowerCamelCase = field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_lowerCamelCase = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
_lowerCamelCase = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
_lowerCamelCase = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
_lowerCamelCase = field(default=lowercase , metadata={"help": "Source language id for translation."} )
_lowerCamelCase = field(default=lowercase , metadata={"help": "Target language id for translation."} )
_lowerCamelCase = field(default=lowercase , metadata={"help": "# num_beams to use for evaluation."} )
_lowerCamelCase = field(
default=lowercase , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] ):
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , F'''{split}_results.json''' ) )
def __snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = parser.parse_args_into_dataclasses()
check_output_dir(UpperCAmelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , UpperCAmelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCamelCase_ = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
assert hasattr(UpperCAmelCase_ , UpperCAmelCase_ ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(UpperCAmelCase_ , UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(UpperCAmelCase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowerCamelCase_ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(UpperCAmelCase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase_ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(UpperCAmelCase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowerCamelCase_ = SeqaSeqDataset
# Get datasets
lowerCamelCase_ = (
dataset_class(
UpperCAmelCase_ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
lowerCamelCase_ = (
dataset_class(
UpperCAmelCase_ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowerCamelCase_ = (
dataset_class(
UpperCAmelCase_ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowerCamelCase_ = (
build_compute_metrics_fn(data_args.task , UpperCAmelCase_ ) if training_args.predict_with_generate else None
)
lowerCamelCase_ = SeqaSeqTrainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , data_args=UpperCAmelCase_ , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , data_collator=SeqaSeqDataCollator(
UpperCAmelCase_ , UpperCAmelCase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , )
lowerCamelCase_ = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
lowerCamelCase_ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , UpperCAmelCase_ , training_args.output_dir )
all_metrics.update(UpperCAmelCase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCamelCase_ = trainer.evaluate(metric_key_prefix="val" )
lowerCamelCase_ = data_args.n_val
lowerCamelCase_ = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , UpperCAmelCase_ , training_args.output_dir )
all_metrics.update(UpperCAmelCase_ )
if training_args.do_predict:
logger.info("*** Predict ***" )
lowerCamelCase_ = trainer.predict(test_dataset=UpperCAmelCase_ , metric_key_prefix="test" )
lowerCamelCase_ = test_output.metrics
lowerCamelCase_ = data_args.n_test
if trainer.is_world_process_zero():
lowerCamelCase_ = round(metrics["test_loss"] , 4 )
handle_metrics("test" , UpperCAmelCase_ , training_args.output_dir )
all_metrics.update(UpperCAmelCase_ )
if training_args.predict_with_generate:
lowerCamelCase_ = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
lowerCamelCase_ = lmap(str.strip , UpperCAmelCase_ )
write_txt_file(UpperCAmelCase_ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(UpperCAmelCase_ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def __snake_case ( UpperCAmelCase_ : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 55 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 358 |
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[int] ) -> int:
_lowerCamelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm1.weight""", F"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm1.bias""", F"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.weight""", F"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.bias""", F"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm2.weight""", F"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm2.bias""", F"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.weight""", F"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.bias""", F"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc2.weight""", F"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.mlp.fc2.bias""", F"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCAmelCase_( lowercase_ : List[Any] , lowercase_ : List[str] ) -> List[str]:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_lowerCamelCase = state_dict.pop(F"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
_lowerCamelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_lowerCamelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase_( lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Dict ) -> str:
_lowerCamelCase = dct.pop(lowercase_ )
_lowerCamelCase = val
def lowerCAmelCase_( lowercase_ : Union[str, Any] ) -> Union[str, Any]:
if "handwritten" in checkpoint_url:
_lowerCamelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowerCamelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
_lowerCamelCase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Dict ) -> List[str]:
_lowerCamelCase = ViTConfig(image_size=3_84 , qkv_bias=lowercase_ )
_lowerCamelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_lowerCamelCase = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
_lowerCamelCase = 10_24
_lowerCamelCase = 40_96
_lowerCamelCase = 24
_lowerCamelCase = 16
_lowerCamelCase = 10_24
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_lowerCamelCase = False
_lowerCamelCase = '''relu'''
_lowerCamelCase = 10_24
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
# load HuggingFace model
_lowerCamelCase = ViTModel(lowercase_ , add_pooling_layer=lowercase_ )
_lowerCamelCase = TrOCRForCausalLM(lowercase_ )
_lowerCamelCase = VisionEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
model.eval()
# load state_dict of original model, rename some keys
_lowerCamelCase = torch.hub.load_state_dict_from_url(lowercase_ , map_location='''cpu''' , check_hash=lowercase_ )['''model''']
_lowerCamelCase = create_rename_keys(lowercase_ , lowercase_ )
for src, dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
read_in_q_k_v(lowercase_ , lowercase_ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_lowerCamelCase = state_dict.pop(lowercase_ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
_lowerCamelCase = val
else:
_lowerCamelCase = val
# load state dict
model.load_state_dict(lowercase_ )
# Check outputs on an image
_lowerCamelCase = ViTImageProcessor(size=encoder_config.image_size )
_lowerCamelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
_lowerCamelCase = TrOCRProcessor(lowercase_ , lowercase_ )
_lowerCamelCase = processor(images=prepare_img(lowercase_ ) , return_tensors='''pt''' ).pixel_values
# verify logits
_lowerCamelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_lowerCamelCase = model(pixel_values=lowercase_ , decoder_input_ids=lowercase_ )
_lowerCamelCase = outputs.logits
_lowerCamelCase = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
_lowerCamelCase = torch.tensor(
[-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] )
elif "trocr-large-handwritten" in checkpoint_url:
_lowerCamelCase = torch.tensor(
[-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] )
elif "trocr-base-printed" in checkpoint_url:
_lowerCamelCase = torch.tensor(
[-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] )
elif "trocr-large-printed" in checkpoint_url:
_lowerCamelCase = torch.tensor(
[-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowercase_ , atol=1e-3 ), "First elements of logits not as expected"
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 73 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_jukebox''': [
'''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''JukeboxConfig''',
'''JukeboxPriorConfig''',
'''JukeboxVQVAEConfig''',
],
'''tokenization_jukebox''': ['''JukeboxTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''JukeboxModel''',
'''JukeboxPreTrainedModel''',
'''JukeboxVQVAE''',
'''JukeboxPrior''',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ : Optional[Any] ={
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict =['''MobileNetV2FeatureExtractor''']
lowerCAmelCase__ : str =['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Union[str, Any] =[
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
lowerCAmelCase__ : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 257 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase: Tuple = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Dict = 'blip_2_vision_model'
def __init__( self : Dict, a_ : int=1408, a_ : Any=6144, a_ : Union[str, Any]=39, a_ : Union[str, Any]=16, a_ : List[str]=224, a_ : Optional[int]=14, a_ : Tuple="gelu", a_ : Tuple=0.00_001, a_ : Dict=0.0, a_ : Union[str, Any]=1e-1_0, a_ : int=True, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = patch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = hidden_act
UpperCamelCase__ = qkv_bias
@classmethod
def lowercase_ ( cls : Any, a_ : Union[str, os.PathLike], **a_ : Any ):
"""simple docstring"""
cls._set_token_in_kwargs(a_ )
UpperCamelCase__ , UpperCamelCase__ = cls.get_config_dict(a_, **a_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
UpperCamelCase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls, "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a_, **a_ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[int] = 'blip_2_qformer'
def __init__( self : Tuple, a_ : Tuple=3_0522, a_ : Any=768, a_ : Union[str, Any]=12, a_ : Optional[int]=12, a_ : Union[str, Any]=3072, a_ : List[Any]="gelu", a_ : Dict=0.1, a_ : Dict=0.1, a_ : List[Any]=512, a_ : Any=0.02, a_ : Optional[Any]=1e-1_2, a_ : str=0, a_ : List[str]="absolute", a_ : List[str]=2, a_ : List[Any]=1408, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(pad_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = cross_attention_frequency
UpperCamelCase__ = encoder_hidden_size
@classmethod
def lowercase_ ( cls : Optional[int], a_ : Union[str, os.PathLike], **a_ : List[str] ):
"""simple docstring"""
cls._set_token_in_kwargs(a_ )
UpperCamelCase__ , UpperCamelCase__ = cls.get_config_dict(a_, **a_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
UpperCamelCase__ = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls, "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a_, **a_ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[int] = 'blip-2'
_lowerCamelCase : Dict = True
def __init__( self : Dict, a_ : List[Any]=None, a_ : Optional[Any]=None, a_ : Dict=None, a_ : Tuple=32, **a_ : Optional[int] ):
"""simple docstring"""
super().__init__(**a_ )
if vision_config is None:
UpperCamelCase__ = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
UpperCamelCase__ = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
UpperCamelCase__ = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
UpperCamelCase__ = BlipaVisionConfig(**a_ )
UpperCamelCase__ = BlipaQFormerConfig(**a_ )
UpperCamelCase__ = text_config["model_type"] if "model_type" in text_config else "opt"
UpperCamelCase__ = CONFIG_MAPPING[text_model_type](**a_ )
UpperCamelCase__ = self.text_config.tie_word_embeddings
UpperCamelCase__ = self.text_config.is_encoder_decoder
UpperCamelCase__ = num_query_tokens
UpperCamelCase__ = self.vision_config.hidden_size
UpperCamelCase__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCamelCase__ = 1.0
UpperCamelCase__ = 0.02
@classmethod
def lowercase_ ( cls : int, a_ : BlipaVisionConfig, a_ : BlipaQFormerConfig, a_ : PretrainedConfig, **a_ : int, ):
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **a_, )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.vision_config.to_dict()
UpperCamelCase__ = self.qformer_config.to_dict()
UpperCamelCase__ = self.text_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output | 31 |
'''simple docstring'''
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_UpperCamelCase )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return values.split("," )
__lowercase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__lowercase: str = parser.parse_args()
get_runner_status(args.target_runners, args.token) | 31 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : List[Any] = XLMTokenizer
A : Optional[int] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE : Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : Optional[int] = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w' ) as fp:
fp.write(json.dumps(A ) )
with open(self.merges_file, 'w' ) as fp:
fp.write('\n'.join(A ) )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = 'lower newer'
SCREAMING_SNAKE_CASE : Dict = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = XLMTokenizer(self.vocab_file, self.merges_file )
SCREAMING_SNAKE_CASE : List[Any] = 'lower'
SCREAMING_SNAKE_CASE : Any = ['low', 'er</w>']
SCREAMING_SNAKE_CASE : int = tokenizer.tokenize(A )
self.assertListEqual(A, A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokens + ['<unk>']
SCREAMING_SNAKE_CASE : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
SCREAMING_SNAKE_CASE : int = tokenizer.encode('sequence builders', add_special_tokens=A )
SCREAMING_SNAKE_CASE : int = tokenizer.encode('multi-sequence build', add_special_tokens=A )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.build_inputs_with_special_tokens(A )
SCREAMING_SNAKE_CASE : Tuple = tokenizer.build_inputs_with_special_tokens(A, A )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 251 |
'''simple docstring'''
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class _a ( unittest.TestCase ):
'''simple docstring'''
A : List[Any] = inspect.getfile(accelerate.test_utils )
A : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
A : List[str] = ['''accelerate''', '''launch''']
A : List[Any] = Path.home() / '''.cache/huggingface/accelerate'''
A : Any = '''default_config.yaml'''
A : Dict = config_folder / config_file
A : Union[str, Any] = config_folder / '''_default_config.yaml'''
A : int = Path('''tests/test_configs''' )
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path], env=os.environ.copy() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=A ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(A ), self.test_file_path], env=os.environ.copy() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
execute_subprocess_async(['accelerate', 'test'], env=os.environ.copy() )
class _a ( unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = '''test-tpu'''
A : List[str] = '''us-central1-a'''
A : List[str] = '''ls'''
A : List[str] = ['''accelerate''', '''tpu-config''']
A : List[str] = '''cd /usr/share'''
A : Optional[int] = '''tests/test_samples/test_command_file.sh'''
A : Optional[int] = '''Running gcloud compute tpus tpu-vm ssh'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'], return_stdout=A )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all", A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
], return_stdout=A, )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all", A, )
| 251 | 1 |
"""simple docstring"""
def _A ( lowercase = 1_00 ):
"""simple docstring"""
a =set()
a =0
a =n + 1 # maximum limit
for a in range(2 , lowercase ):
for b in range(2 , lowercase ):
a =a**b # calculates the current power
collect_powers.add(lowercase ) # adds the result to the set
return len(lowercase )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip()))) | 361 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase_ : Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
lowerCamelCase_ : Any = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCamelCase_ : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _A ( lowercase ):
"""simple docstring"""
with open(lowercase , '''rb''' ) as f:
a =Image.open(lowercase )
return im.convert('''RGB''' )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "A folder containing the training data."} )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "A folder containing the validation data."} )
__lowerCAmelCase = field(
default=0.1_5, metadata={"help": "Percent to split off of train for validation."} )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
}, )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = field(
default="google/vit-base-patch16-224-in21k", metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_SCREAMING_SNAKE_CASE )}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
__lowerCAmelCase = field(
default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, )
__lowerCAmelCase = field(default=_SCREAMING_SNAKE_CASE, metadata={"help": "Name or path of preprocessor config."} )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
}, )
__lowerCAmelCase = field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."}, )
def _A ( lowercase ):
"""simple docstring"""
a =torch.stack([example['''pixel_values'''] for example in examples] )
a =torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _A ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''' , lowercase , lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a =training_args.get_process_log_level()
logger.setLevel(lowercase )
transformers.utils.logging.set_verbosity(lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
a =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
a =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , )
else:
a ={}
if data_args.train_dir is not None:
a =os.path.join(data_args.train_dir , '''**''' )
if data_args.validation_dir is not None:
a =os.path.join(data_args.validation_dir , '''**''' )
a =load_dataset(
'''imagefolder''' , data_files=lowercase , cache_dir=model_args.cache_dir , task='''image-classification''' , )
# If we don't have a validation split, split off a percentage of train as validation.
a =None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase ) and data_args.train_val_split > 0.0:
a =dataset['''train'''].train_test_split(data_args.train_val_split )
a =split['''train''']
a =split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
a =dataset['''train'''].features['''labels'''].names
a , a ={}, {}
for i, label in enumerate(lowercase ):
a =str(lowercase )
a =label
# Load the accuracy metric from the datasets package
a =evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
a =AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase ) , labelaid=lowercase , idalabel=lowercase , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a =AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
a =AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
a =image_processor.size['''shortest_edge''']
else:
a =(image_processor.size['''height'''], image_processor.size['''width'''])
a =Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
a =Compose(
[
RandomResizedCrop(lowercase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
a =Compose(
[
Resize(lowercase ),
CenterCrop(lowercase ),
ToTensor(),
normalize,
] )
def train_transforms(lowercase ):
a =[
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(lowercase ):
a =[_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
a =(
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowercase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
a =(
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowercase )
# Initalize our trainer
a =Trainer(
model=lowercase , args=lowercase , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=lowercase , tokenizer=lowercase , data_collator=lowercase , )
# Training
if training_args.do_train:
a =None
if training_args.resume_from_checkpoint is not None:
a =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a =last_checkpoint
a =trainer.train(resume_from_checkpoint=lowercase )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
a =trainer.evaluate()
trainer.log_metrics('''eval''' , lowercase )
trainer.save_metrics('''eval''' , lowercase )
# Write model card and (optionally) push to hub
a ={
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase )
else:
trainer.create_model_card(**lowercase )
if __name__ == "__main__":
main() | 215 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Tuple = "luke"
def __init__( self : Optional[int] , A : Tuple=50267 , A : Optional[Any]=500000 , A : Dict=768 , A : List[Any]=256 , A : List[Any]=12 , A : List[Any]=12 , A : List[Any]=3072 , A : int="gelu" , A : Optional[int]=0.1 , A : List[Any]=0.1 , A : Dict=512 , A : Dict=2 , A : Optional[int]=0.02 , A : List[str]=1E-12 , A : List[str]=True , A : Dict=None , A : List[Any]=1 , A : Optional[int]=0 , A : Any=2 , **A : Tuple , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Optional[Any] = entity_vocab_size
_UpperCAmelCase : int = hidden_size
_UpperCAmelCase : Optional[Any] = entity_emb_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : Dict = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : Dict = layer_norm_eps
_UpperCAmelCase : Union[str, Any] = use_entity_aware_attention
_UpperCAmelCase : int = classifier_dropout
| 31 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a__: Dict = logging.get_logger(__name__)
a__: str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__: Any = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
a__: Any = {
'distilbert-base-uncased': 512,
'distilbert-base-uncased-distilled-squad': 512,
'distilbert-base-cased': 512,
'distilbert-base-cased-distilled-squad': 512,
'distilbert-base-german-cased': 512,
'distilbert-base-multilingual-cased': 512,
}
a__: Optional[Any] = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
__SCREAMING_SNAKE_CASE = DistilBertTokenizer
def __init__( self,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=True,__lowerCamelCase="[UNK]",__lowerCamelCase="[SEP]",__lowerCamelCase="[PAD]",__lowerCamelCase="[CLS]",__lowerCamelCase="[MASK]",__lowerCamelCase=True,__lowerCamelCase=None,**__lowerCamelCase,):
super().__init__(
__lowerCamelCase,tokenizer_file=__lowerCamelCase,do_lower_case=__lowerCamelCase,unk_token=__lowerCamelCase,sep_token=__lowerCamelCase,pad_token=__lowerCamelCase,cls_token=__lowerCamelCase,mask_token=__lowerCamelCase,tokenize_chinese_chars=__lowerCamelCase,strip_accents=__lowerCamelCase,**__lowerCamelCase,)
A__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''',__lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''',__lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''',__lowerCamelCase ) != tokenize_chinese_chars
):
A__ = getattr(__lowerCamelCase,normalizer_state.pop('''type''' ) )
A__ = do_lower_case
A__ = strip_accents
A__ = tokenize_chinese_chars
A__ = normalizer_class(**__lowerCamelCase )
A__ = do_lower_case
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=None ):
A__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = self._tokenizer.model.save(__lowerCamelCase,name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 193 | 0 |
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : Optional[Any] , **__a : Tuple ):
requires_backends(self , ["bs4"] )
super().__init__(**__a )
def _lowercase (self : Union[str, Any] , __a : Union[str, Any] ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCAmelCase_ = parent.find_all(child.name , recursive=__a )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__a ) else next(i for i, s in enumerate(__a , 1 ) if s is child ) )
UpperCAmelCase_ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _lowercase (self : Tuple , __a : Optional[Any] ):
UpperCAmelCase_ = BeautifulSoup(__a , "html.parser" )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for element in html_code.descendants:
if type(__a ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCAmelCase_ = html.unescape(__a ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__a )
UpperCAmelCase_ , UpperCAmelCase_ = self.xpath_soup(__a )
stringaxtag_seq.append(__a )
stringaxsubs_seq.append(__a )
if len(__a ) != len(__a ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(__a ) != len(__a ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _lowercase (self : str , __a : List[str] , __a : Any ):
UpperCAmelCase_ = ""
for tagname, subs in zip(__a , __a ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__(self : int , __a : Tuple ):
UpperCAmelCase_ = False
# Check that strings has a valid type
if isinstance(__a , __a ):
UpperCAmelCase_ = True
elif isinstance(__a , (list, tuple) ):
if len(__a ) == 0 or isinstance(html_strings[0] , __a ):
UpperCAmelCase_ = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
f"""but is of type {type(__a )}.""" )
UpperCAmelCase_ = bool(isinstance(__a , (list, tuple) ) and (isinstance(html_strings[0] , __a )) )
if not is_batched:
UpperCAmelCase_ = [html_strings]
# Get nodes + xpaths
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for html_string in html_strings:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.get_three_from_single(__a )
nodes.append(__a )
UpperCAmelCase_ = []
for node, tag_list, sub_list in zip(__a , __a , __a ):
UpperCAmelCase_ = self.construct_xpath(__a , __a )
xpath_strings.append(__a )
xpaths.append(__a )
# return as Dict
UpperCAmelCase_ = {"nodes": nodes, "xpaths": xpaths}
UpperCAmelCase_ = BatchFeature(data=__a , tensor_type=__a )
return encoded_inputs
| 106 | '''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
SCREAMING_SNAKE_CASE_: List[str] =get_logger()
SCREAMING_SNAKE_CASE_: Optional[dict] =None
class __A ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
def __init__(self : List[Any] , __a : Optional[int]=None , __a : Any=None , **__a : Dict ):
super().__init__(features=__a )
import jax
from jaxlib.xla_client import Device
if isinstance(__a , __a ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(__a )}, as `jaxlib.xla_extension.Device` """
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
UpperCAmelCase_ = device if isinstance(__a , __a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
UpperCAmelCase_ = str(jax.devices()[0] )
UpperCAmelCase_ = jnp_array_kwargs
@staticmethod
def _lowercase ():
import jax
return {str(__a ): device for device in jax.devices()}
def _lowercase (self : str , __a : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(__a , __a ) and column:
if all(
isinstance(__a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__a , axis=0 )
return column
def _lowercase (self : Any , __a : Optional[int] ):
import jax
import jax.numpy as jnp
if isinstance(__a , (str, bytes, type(__a )) ):
return value
elif isinstance(__a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase_ = {}
if isinstance(__a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCAmelCase_ = {"dtype": jnp.intaa}
else:
UpperCAmelCase_ = {"dtype": jnp.intaa}
elif isinstance(__a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase_ = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__a , PIL.Image.Image ):
UpperCAmelCase_ = np.asarray(__a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__a , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase (self : int , __a : Any ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__a , "__array__" ) and not isinstance(__a , jax.Array ):
UpperCAmelCase_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__a ) for substruct in data_struct] )
elif isinstance(__a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__a ) for substruct in data_struct] )
return self._tensorize(__a )
def _lowercase (self : Union[str, Any] , __a : dict ):
return map_nested(self._recursive_tensorize , __a , map_list=__a )
def _lowercase (self : str , __a : pa.Table ):
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_row(__a )
UpperCAmelCase_ = self.python_features_decoder.decode_row(__a )
return self.recursive_tensorize(__a )
def _lowercase (self : Tuple , __a : pa.Table ):
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_column(__a )
UpperCAmelCase_ = self.python_features_decoder.decode_column(__a , pa_table.column_names[0] )
UpperCAmelCase_ = self.recursive_tensorize(__a )
UpperCAmelCase_ = self._consolidate(__a )
return column
def _lowercase (self : str , __a : pa.Table ):
UpperCAmelCase_ = self.numpy_arrow_extractor().extract_batch(__a )
UpperCAmelCase_ = self.python_features_decoder.decode_batch(__a )
UpperCAmelCase_ = self.recursive_tensorize(__a )
for column_name in batch:
UpperCAmelCase_ = self._consolidate(batch[column_name] )
return batch
| 106 | 1 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class a ( _lowerCAmelCase ):
_lowerCAmelCase = DistilBertTokenizer
_lowerCAmelCase = DistilBertTokenizerFast
_lowerCAmelCase = True
@slow
def __UpperCAmelCase ( self ) -> str:
_a = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
_a = tokenizer.encode('sequence builders' , add_special_tokens=__magic_name__ )
_a = tokenizer.encode('multi-sequence build' , add_special_tokens=__magic_name__ )
_a = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
_a = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 168 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[Any] ={
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 223 | 0 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class lowercase_ ( __lowercase ):
UpperCamelCase_ : int = '''align_text_model'''
def __init__( self : Tuple , A__ : Tuple=30522 , A__ : str=768 , A__ : Tuple=12 , A__ : Dict=12 , A__ : Any=3072 , A__ : str="gelu" , A__ : int=0.1 , A__ : Optional[Any]=0.1 , A__ : int=512 , A__ : List[str]=2 , A__ : Any=0.02 , A__ : Dict=1e-12 , A__ : Tuple=0 , A__ : Optional[Any]="absolute" , A__ : str=True , **A__ : Union[str, Any] , ) -> Optional[int]:
super().__init__(**_a )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = use_cache
_snake_case = pad_token_id
@classmethod
def UpperCamelCase_ ( cls : List[str] , A__ : Union[str, os.PathLike] , **A__ : Any ) -> Optional[Any]:
cls._set_token_in_kwargs(_a )
_snake_case, _snake_case = cls.get_config_dict(_a , **_a )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
_snake_case = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a , **_a )
class lowercase_ ( __lowercase ):
UpperCamelCase_ : List[Any] = '''align_vision_model'''
def __init__( self : List[str] , A__ : int = 3 , A__ : int = 600 , A__ : float = 2.0 , A__ : float = 3.1 , A__ : int = 8 , A__ : List[int] = [3, 3, 5, 3, 5, 5, 3] , A__ : List[int] = [32, 16, 24, 40, 80, 112, 192] , A__ : List[int] = [16, 24, 40, 80, 112, 192, 320] , A__ : List[int] = [] , A__ : List[int] = [1, 2, 2, 2, 1, 2, 1] , A__ : List[int] = [1, 2, 2, 3, 3, 4, 1] , A__ : List[int] = [1, 6, 6, 6, 6, 6, 6] , A__ : float = 0.25 , A__ : str = "swish" , A__ : int = 2560 , A__ : str = "mean" , A__ : float = 0.02 , A__ : float = 0.001 , A__ : float = 0.99 , A__ : float = 0.2 , **A__ : List[Any] , ) -> str:
super().__init__(**_a )
_snake_case = num_channels
_snake_case = image_size
_snake_case = width_coefficient
_snake_case = depth_coefficient
_snake_case = depth_divisor
_snake_case = kernel_sizes
_snake_case = in_channels
_snake_case = out_channels
_snake_case = depthwise_padding
_snake_case = strides
_snake_case = num_block_repeats
_snake_case = expand_ratios
_snake_case = squeeze_expansion_ratio
_snake_case = hidden_act
_snake_case = hidden_dim
_snake_case = pooling_type
_snake_case = initializer_range
_snake_case = batch_norm_eps
_snake_case = batch_norm_momentum
_snake_case = drop_connect_rate
_snake_case = sum(_a ) * 4
@classmethod
def UpperCamelCase_ ( cls : Tuple , A__ : Union[str, os.PathLike] , **A__ : Union[str, Any] ) -> List[str]:
cls._set_token_in_kwargs(_a )
_snake_case, _snake_case = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
_snake_case = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a , **_a )
class lowercase_ ( __lowercase ):
UpperCamelCase_ : List[Any] = '''align'''
UpperCamelCase_ : Optional[int] = True
def __init__( self : Optional[int] , A__ : Tuple=None , A__ : int=None , A__ : Any=640 , A__ : Optional[Any]=1.0 , A__ : Tuple=0.02 , **A__ : List[Any] , ) -> Optional[int]:
super().__init__(**_a )
if text_config is None:
_snake_case = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
_snake_case = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
_snake_case = AlignTextConfig(**_a )
_snake_case = AlignVisionConfig(**_a )
_snake_case = projection_dim
_snake_case = temperature_init_value
_snake_case = initializer_range
@classmethod
def UpperCamelCase_ ( cls : Optional[int] , A__ : AlignTextConfig , A__ : AlignVisionConfig , **A__ : Optional[Any] ) -> Optional[Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_a )
def UpperCamelCase_ ( self : Tuple ) -> Any:
_snake_case = copy.deepcopy(self.__dict__ )
_snake_case = self.text_config.to_dict()
_snake_case = self.vision_config.to_dict()
_snake_case = self.__class__.model_type
return output
| 363 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
_snake_case = AlbertConfig.from_json_file(_UpperCamelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
_snake_case = AlbertForPreTraining(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _UpperCamelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 278 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __lowerCamelCase ( __snake_case : str ) -> None:
"""simple docstring"""
A__ , A__ : Union[str, Any] =analyze_text(__snake_case )
A__ : int =list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
A__ : Tuple =sum(single_char_strings.values() )
# one length string
A__ : Optional[int] =0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
A__ : List[Any] =single_char_strings[ch]
A__ : Dict =my_str / all_sum
my_fir_sum += prob * math.loga(__snake_case ) # entropy formula.
# print entropy
print(f"{round(-1 * my_fir_sum ):.1f}" )
# two len string
A__ : Tuple =sum(two_char_strings.values() )
A__ : Union[str, Any] =0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
A__ : str =cha + cha
if sequence in two_char_strings:
A__ : str =two_char_strings[sequence]
A__ : Tuple =int(__snake_case ) / all_sum
my_sec_sum += prob * math.loga(__snake_case )
# print second entropy
print(f"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(f"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def __lowerCamelCase ( __snake_case : str ) -> tuple[dict, dict]:
"""simple docstring"""
A__ : List[Any] =Counter() # type: ignore
A__ : str =Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(__snake_case ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 134 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(__snake_case : int, __snake_case : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
A__ : int =update_area_of_max_square(__snake_case, col + 1 )
A__ : int =update_area_of_max_square(row + 1, col + 1 )
A__ : int =update_area_of_max_square(row + 1, __snake_case )
if mat[row][col]:
A__ : Optional[Any] =1 + min([right, diagonal, down] )
A__ : Dict =max(largest_square_area[0], __snake_case )
return sub_problem_sol
else:
return 0
A__ : List[Any] =[0]
update_area_of_max_square(0, 0 )
return largest_square_area[0]
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
__snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
A__ : str =update_area_of_max_square_using_dp_array(__snake_case, col + 1, __snake_case )
A__ : Any =update_area_of_max_square_using_dp_array(row + 1, col + 1, __snake_case )
A__ : List[str] =update_area_of_max_square_using_dp_array(row + 1, __snake_case, __snake_case )
if mat[row][col]:
A__ : Optional[int] =1 + min([right, diagonal, down] )
A__ : Any =max(largest_square_area[0], __snake_case )
A__ : Union[str, Any] =sub_problem_sol
return sub_problem_sol
else:
return 0
A__ : Any =[0]
A__ : Optional[Any] =[[-1] * cols for _ in range(__snake_case )]
update_area_of_max_square_using_dp_array(0, 0, __snake_case )
return largest_square_area[0]
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
A__ : Optional[int] =[[0] * (cols + 1) for _ in range(rows + 1 )]
A__ : str =0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
A__ : List[Any] =dp_array[row][col + 1]
A__ : List[str] =dp_array[row + 1][col + 1]
A__ : str =dp_array[row + 1][col]
if mat[row][col] == 1:
A__ : str =1 + min(__snake_case, __snake_case, __snake_case )
A__ : Optional[Any] =max(dp_array[row][col], __snake_case )
else:
A__ : Tuple =0
return largest_square_area
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : list[list[int]] ) -> int:
"""simple docstring"""
A__ : Union[str, Any] =[0] * (cols + 1)
A__ : int =[0] * (cols + 1)
A__ : str =0
for row in range(rows - 1, -1, -1 ):
for col in range(cols - 1, -1, -1 ):
A__ : Union[str, Any] =current_row[col + 1]
A__ : List[str] =next_row[col + 1]
A__ : str =next_row[col]
if mat[row][col] == 1:
A__ : str =1 + min(__snake_case, __snake_case, __snake_case )
A__ : Dict =max(current_row[col], __snake_case )
else:
A__ : str =0
A__ : Optional[Any] =current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 134 | 1 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__UpperCAmelCase : Any = logging.getLogger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : torch.nn.Module , SCREAMING_SNAKE_CASE_ : BnbQuantizationConfig , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] = None , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Union[int, str, torch.device]]] = None , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE_ : Optional[Dict[Union[int, str], Union[int, str]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE_ : bool = False , ):
"""simple docstring"""
UpperCamelCase : Any = bnb_quantization_config.load_in_abit
UpperCamelCase : List[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
UpperCamelCase : List[Any] = []
# custom device map
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(device_map.keys() ) > 1:
UpperCamelCase : str = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCamelCase : Union[str, Any] = get_keys_to_not_convert(SCREAMING_SNAKE_CASE_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCamelCase : Any = []
UpperCamelCase : Any = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(SCREAMING_SNAKE_CASE_ )
# compatibility with peft
UpperCamelCase : List[Any] = load_in_abit
UpperCamelCase : int = load_in_abit
UpperCamelCase : Tuple = get_parameter_device(SCREAMING_SNAKE_CASE_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
UpperCamelCase : str = replace_with_bnb_layers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , modules_to_not_convert=SCREAMING_SNAKE_CASE_ )
# convert param to the right dtype
UpperCamelCase : List[str] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCamelCase : Union[str, Any] = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
UpperCamelCase : Dict = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
param.to(SCREAMING_SNAKE_CASE_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
UpperCamelCase : Optional[int] = replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , modules_to_not_convert=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = get_quantized_model_device_map(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , max_memory=SCREAMING_SNAKE_CASE_ , no_split_module_classes=SCREAMING_SNAKE_CASE_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCamelCase : Optional[int] = True
UpperCamelCase : List[Any] = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=SCREAMING_SNAKE_CASE_ , offload_state_dict=SCREAMING_SNAKE_CASE_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(SCREAMING_SNAKE_CASE_ , device_map=SCREAMING_SNAKE_CASE_ , offload_dir=SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
UpperCamelCase : Any = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
UpperCamelCase : List[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCamelCase : List[str] = {}
UpperCamelCase : Dict = special_dtypes
UpperCamelCase : Optional[Any] = no_split_module_classes
UpperCamelCase : Tuple = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCamelCase : int = get_balanced_memory(
SCREAMING_SNAKE_CASE_ , low_zero=(device_map == '''balanced_low_0''') , max_memory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Tuple = max_memory
UpperCamelCase : Any = infer_auto_device_map(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# check if don't have any quantized module on the cpu
UpperCamelCase : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCamelCase : Union[str, Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ):
"""simple docstring"""
if modules_to_not_convert is None:
UpperCamelCase : Union[str, Any] = []
UpperCamelCase , UpperCamelCase : Any = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = False
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase : Optional[Any] = []
current_key_name.append(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCamelCase : Union[str, Any] = '''.'''.join(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCamelCase : List[str] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCamelCase : Dict = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=SCREAMING_SNAKE_CASE_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCamelCase : Optional[int] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
UpperCamelCase : Any = module.weight.data
if module.bias is not None:
UpperCamelCase : Tuple = module.bias.data
bnb_module.requires_grad_(SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = True
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase : Optional[int] = _replace_with_bnb_layers(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a ( SCREAMING_SNAKE_CASE_ : str ):
"""simple docstring"""
with init_empty_weights():
UpperCamelCase : Any = deepcopy(SCREAMING_SNAKE_CASE_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCamelCase : Dict = find_tied_parameters(SCREAMING_SNAKE_CASE_ )
# For compatibility with Accelerate < 0.18
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : str = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase : Dict = sum(SCREAMING_SNAKE_CASE_ , [] )
UpperCamelCase : List[str] = len(SCREAMING_SNAKE_CASE_ ) > 0
# Check if it is a base model
UpperCamelCase : Optional[int] = False
if hasattr(SCREAMING_SNAKE_CASE_ , '''base_model_prefix''' ):
UpperCamelCase : Optional[Any] = not hasattr(SCREAMING_SNAKE_CASE_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase : Any = list(model.named_children() )
UpperCamelCase : List[str] = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase : List[Any] = set(SCREAMING_SNAKE_CASE_ ) - set(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = list(set(SCREAMING_SNAKE_CASE_ ) ) + list(SCREAMING_SNAKE_CASE_ )
# remove ".weight" from the keys
UpperCamelCase : Optional[int] = ['''.weight''', '''.bias''']
UpperCamelCase : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase : Optional[Any] = name.replace(SCREAMING_SNAKE_CASE_ , '''''' )
filtered_module_names.append(SCREAMING_SNAKE_CASE_ )
return filtered_module_names
def a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
for m in model.modules():
if isinstance(SCREAMING_SNAKE_CASE_ , bnb.nn.Linearabit ):
return True
return False
def a ( SCREAMING_SNAKE_CASE_ : nn.Module ):
"""simple docstring"""
return next(parameter.parameters() ).device
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0 , dtype=SCREAMING_SNAKE_CASE_ , value=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = param_name
UpperCamelCase : Union[str, Any] = model
if "." in tensor_name:
UpperCamelCase : Any = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCamelCase : str = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
UpperCamelCase : Any = new_module
UpperCamelCase : int = splits[-1]
# offload weights
UpperCamelCase : List[Any] = False
offload_weight(module._parameters[tensor_name] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ , )
else:
offload_weight(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
offload_weight(SCREAMING_SNAKE_CASE_ , param_name.replace('''weight''' , '''SCB''' ) , SCREAMING_SNAKE_CASE_ , index=SCREAMING_SNAKE_CASE_ )
set_module_tensor_to_device(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , '''meta''' , dtype=SCREAMING_SNAKE_CASE_ , value=torch.empty(*param.size() ) )
| 315 |
__UpperCAmelCase : str = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase : Dict = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase : Union[str, Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 315 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__a = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['PerceiverFeatureExtractor']
__a = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 145 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = get_activation('''swish''' )
self.assertIsInstance(_a , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
__a = get_activation('''silu''' )
self.assertIsInstance(_a , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
__a = get_activation('''mish''' )
self.assertIsInstance(_a , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
__a = get_activation('''gelu''' )
self.assertIsInstance(_a , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 45 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
UpperCAmelCase__ = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
UpperCAmelCase__ = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
UpperCAmelCase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase__ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase__ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRContextEncoderTokenizer
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = DPRQuestionEncoderTokenizer
UpperCAmelCase__ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
UpperCAmelCase__ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
UpperCAmelCase__ = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(A )
class __lowerCAmelCase :
def __call__( self : Optional[Any] , A : Optional[Any] , A : Optional[str] = None , A : Optional[str] = None , A : Union[bool, str] = False , A : Union[bool, str] = False , A : Optional[int] = None , A : Optional[Union[str, TensorType]] = None , A : Optional[bool] = None , **A : List[Any] , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
A , padding=A , truncation=A , max_length=A , return_tensors=A , return_attention_mask=A , **A , )
elif titles is None or texts is None:
_UpperCAmelCase = titles if texts is None else texts
return super().__call__(
A , A , padding=A , truncation=A , max_length=A , return_tensors=A , return_attention_mask=A , **A , )
_UpperCAmelCase = titles if not isinstance(A , A) else [titles]
_UpperCAmelCase = texts if not isinstance(A , A) else [texts]
_UpperCAmelCase = len(A)
_UpperCAmelCase = questions if not isinstance(A , A) else [questions] * n_passages
assert len(A) == len(
A), F"There should be as many titles than texts but got {len(A)} titles and {len(A)} texts."
_UpperCAmelCase = super().__call__(A , A , padding=A , truncation=A)['input_ids']
_UpperCAmelCase = super().__call__(A , add_special_tokens=A , padding=A , truncation=A)['input_ids']
_UpperCAmelCase = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(A , A)
]
}
if return_attention_mask is not False:
_UpperCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
_UpperCAmelCase = attention_mask
return self.pad(A , padding=A , max_length=A , return_tensors=A)
def _lowerCamelCase ( self : str , A : BatchEncoding , A : DPRReaderOutput , A : int = 16 , A : int = 64 , A : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_UpperCAmelCase = reader_input['input_ids']
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = reader_output[:3]
_UpperCAmelCase = len(A)
_UpperCAmelCase = sorted(range(A) , reverse=A , key=relevance_logits.__getitem__)
_UpperCAmelCase = []
for doc_id in sorted_docs:
_UpperCAmelCase = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
_UpperCAmelCase = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_UpperCAmelCase = sequence_ids.index(self.pad_token_id)
else:
_UpperCAmelCase = len(A)
_UpperCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=A , top_spans=A , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=A , start_index=A , end_index=A , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(A) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowerCamelCase ( self : Tuple , A : List[int] , A : List[int] , A : int , A : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_UpperCAmelCase = []
for start_index, start_score in enumerate(A):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
_UpperCAmelCase = sorted(A , key=lambda A: x[1] , reverse=A)
_UpperCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"Wrong span indices: [{start_index}:{end_index}]"
_UpperCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, F"Span is too long: {length} > {max_answer_length}"
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(A) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A )
class __lowerCAmelCase ( A , A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = DPRReaderTokenizer
| 369 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __lowerCAmelCase :
def __init__( self : str , A : str , A : Dict=13 , A : int=7 , A : Tuple=True , A : Union[str, Any]=True , A : Any=True , A : Dict=True , A : Dict=99 , A : Tuple=32 , A : Any=2 , A : Any=4 , A : Any=37 , A : Optional[Any]="gelu" , A : List[Any]=0.1 , A : Tuple=0.1 , A : Optional[Any]=5_12 , A : Tuple=16 , A : int=2 , A : List[str]=0.0_2 , A : int=False , A : List[Any]=True , A : Optional[Any]="None" , A : Union[str, Any]=3 , A : List[str]=4 , A : List[Any]=None , ) -> int:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = relative_attention
_UpperCAmelCase = position_biased_input
_UpperCAmelCase = pos_att_type
_UpperCAmelCase = scope
def _lowerCamelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCAmelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int] , A : Tuple , A : int , A : Any , A : List[str] , A : List[str] , A : int) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaModel(config=A)
_UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(A)
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : str , A : Tuple , A : Tuple , A : Optional[int] , A : List[str] , A : Any , A : List[str] , A : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaForMaskedLM(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCamelCase ( self : List[Any] , A : Tuple , A : Tuple , A : Optional[int] , A : Optional[int] , A : List[Any] , A : Any , A : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFDebertaVaForSequenceClassification(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : Union[str, Any] , A : List[Any] , A : List[Any] , A : List[str] , A : Optional[Any] , A : int , A : Any , A : int) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFDebertaVaForTokenClassification(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCamelCase ( self : List[Any] , A : List[Any] , A : List[str] , A : Dict , A : Dict , A : Any , A : Tuple , A : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaForQuestionAnswering(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaModelTester(self)
_UpperCAmelCase = ConfigTester(self , config_class=A , hidden_size=37)
def _lowerCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A)
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A)
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A)
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A)
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A)
@slow
def _lowerCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge')
self.assertIsNotNone(A)
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet')
def _lowerCamelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
pass
@slow
def _lowerCamelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge')
_UpperCAmelCase = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]])
_UpperCAmelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
_UpperCAmelCase = model(A , attention_mask=A)[0]
_UpperCAmelCase = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]])
tf.debugging.assert_near(output[:, 1:4, 1:4] , A , atol=1E-4)
| 290 | 0 |
_lowerCamelCase : Any = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowerCamelCase : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowerCamelCase : Tuple = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
} | 282 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "openai-gpt"
__UpperCamelCase = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , lowercase_ : List[str]=40478 , lowercase_ : List[str]=512 , lowercase_ : Optional[Any]=768 , lowercase_ : Tuple=12 , lowercase_ : Tuple=12 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=1e-5 , lowercase_ : int=0.02 , lowercase_ : Optional[int]="cls_index" , lowercase_ : Any=True , lowercase_ : List[Any]=None , lowercase_ : List[str]=True , lowercase_ : Optional[Any]=0.1 , **lowercase_ : List[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = n_positions
SCREAMING_SNAKE_CASE_ : Optional[int] = n_embd
SCREAMING_SNAKE_CASE_ : Dict = n_layer
SCREAMING_SNAKE_CASE_ : Any = n_head
SCREAMING_SNAKE_CASE_ : Union[str, Any] = afn
SCREAMING_SNAKE_CASE_ : int = resid_pdrop
SCREAMING_SNAKE_CASE_ : List[str] = embd_pdrop
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attn_pdrop
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = summary_type
SCREAMING_SNAKE_CASE_ : Tuple = summary_use_proj
SCREAMING_SNAKE_CASE_ : Union[str, Any] = summary_activation
SCREAMING_SNAKE_CASE_ : Any = summary_first_dropout
SCREAMING_SNAKE_CASE_ : List[str] = summary_proj_to_labels
super().__init__(**lowercase_)
| 91 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase_ : Tuple = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """ernie_m"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {"""dropout""": """classifier_dropout""", """num_classes""": """num_labels"""}
def __init__( self ,_SCREAMING_SNAKE_CASE = 250_002 ,_SCREAMING_SNAKE_CASE = 768 ,_SCREAMING_SNAKE_CASE = 12 ,_SCREAMING_SNAKE_CASE = 12 ,_SCREAMING_SNAKE_CASE = 3_072 ,_SCREAMING_SNAKE_CASE = "gelu" ,_SCREAMING_SNAKE_CASE = 0.1 ,_SCREAMING_SNAKE_CASE = 0.1 ,_SCREAMING_SNAKE_CASE = 514 ,_SCREAMING_SNAKE_CASE = 0.0_2 ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = 1e-05 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=0.0 ,**_SCREAMING_SNAKE_CASE ,) -> List[str]:
super().__init__(pad_token_id=__a ,**__a )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = classifier_dropout
_snake_case = is_decoder
_snake_case = act_dropout
| 362 |
'''simple docstring'''
def __a ( _UpperCamelCase: int ) -> None:
"""simple docstring"""
_snake_case = generate_pascal_triangle(_UpperCamelCase )
for row_idx in range(_UpperCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def __a ( _UpperCamelCase: int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_snake_case = []
for current_row_idx in range(_UpperCamelCase ):
_snake_case = populate_current_row(_UpperCamelCase , _UpperCamelCase )
triangle.append(_UpperCamelCase )
return triangle
def __a ( _UpperCamelCase: list[list[int]] , _UpperCamelCase: int ) -> list[int]:
"""simple docstring"""
_snake_case = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_snake_case , _snake_case = 1, 1
for current_col_idx in range(1 , _UpperCamelCase ):
calculate_current_element(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return current_row
def __a ( _UpperCamelCase: list[list[int]] , _UpperCamelCase: list[int] , _UpperCamelCase: int , _UpperCamelCase: int , ) -> None:
"""simple docstring"""
_snake_case = triangle[current_row_idx - 1][current_col_idx - 1]
_snake_case = triangle[current_row_idx - 1][current_col_idx]
_snake_case = above_to_left_elt + above_to_right_elt
def __a ( _UpperCamelCase: int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_snake_case = [[1]]
for row_index in range(1 , _UpperCamelCase ):
_snake_case = [0] + result[-1] + [0]
_snake_case = row_index + 1
# Calculate the number of distinct elements in a row
_snake_case = sum(divmod(_UpperCamelCase , 2 ) )
_snake_case = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
_snake_case = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_snake_case = row_first_half + row_second_half
result.append(_UpperCamelCase )
return result
def __a ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase: Callable , _UpperCamelCase: int ) -> None:
_snake_case = F"""{func.__name__}({value})"""
_snake_case = timeit(F"""__main__.{call}""" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 142 | 0 |
from __future__ import annotations
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None )-> None:
if start is None:
lowerCAmelCase_ : int = 0
if end is None:
lowerCAmelCase_ : List[Any] = len(lowerCAmelCase_ ) - 1
if start >= end:
return
lowerCAmelCase_ : str = (start + end) // 2
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ )
if sequence[end] < sequence[mid]:
lowerCAmelCase_ , lowerCAmelCase_ : int = sequence[mid], sequence[end]
slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod() | 262 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_UpperCAmelCase : Any =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__lowercase , **__lowercase ) -> None:
warnings.warn(
'''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ChineseCLIPImageProcessor instead.''' , __lowercase , )
super().__init__(*__lowercase , **__lowercase ) | 262 | 1 |
def snake_case_ ( snake_case ) -> list[int]:
lowercase__: Dict = [0 for i in range(len(snake_case ) )]
# initialize interval's left pointer and right pointer
lowercase__ , lowercase__: Union[str, Any] = 0, 0
for i in range(1 , len(snake_case ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowercase__: List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowercase__: List[str] = min_edge
while go_next(snake_case , snake_case , snake_case ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowercase__ , lowercase__: List[Any] = i, i + z_result[i] - 1
return z_result
def snake_case_ ( snake_case , snake_case , snake_case ) -> bool:
return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case_ ( snake_case , snake_case ) -> int:
lowercase__: Tuple = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowercase__: Any = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(snake_case ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288 |
from collections import deque
from math import floor
from random import random
from time import time
class __a :
def __init__( self ) -> Dict:
'''simple docstring'''
lowercase__: Dict = {}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 ) -> Optional[int]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowercase__: int = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
lowercase__: Union[str, Any] = []
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
return list(self.graph )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> Union[str, Any]:
'''simple docstring'''
if s == d:
return []
lowercase__: Tuple = []
lowercase__: Tuple = []
if s == -2:
lowercase__: Any = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Dict = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
lowercase__: Optional[int] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-1 ) -> List[str]:
'''simple docstring'''
if c == -1:
lowercase__: int = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase__: str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> Dict:
'''simple docstring'''
lowercase__: int = deque()
lowercase__: Dict = []
if s == -2:
lowercase__: Optional[int] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
lowercase__: str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
lowercase__: Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return len(self.graph[u] )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Tuple = []
lowercase__: str = []
if s == -2:
lowercase__: Dict = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: List[Any] = s
lowercase__: Any = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Dict = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
lowercase__: int = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: List[Any] = []
lowercase__: int = []
lowercase__: List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Dict = -2
lowercase__: Union[str, Any] = []
lowercase__: List[str] = s
lowercase__: Dict = False
lowercase__: Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: List[Any] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: Any = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Union[str, Any] = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: int = s
lowercase__: str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Any = []
lowercase__: int = []
lowercase__: Dict = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Optional[int] = -2
lowercase__: List[Any] = []
lowercase__: List[str] = s
lowercase__: List[Any] = False
lowercase__: str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Any = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: Any = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Optional[Any] = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: Dict = s
lowercase__: Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> Dict:
'''simple docstring'''
lowercase__: Union[str, Any] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: Optional[Any] = time()
return end - begin
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> List[str]:
'''simple docstring'''
lowercase__: str = time()
self.bfs(lowerCAmelCase__ )
lowercase__: List[str] = time()
return end - begin
class __a :
def __init__( self ) -> Tuple:
'''simple docstring'''
lowercase__: Dict = {}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=1 ) -> List[Any]:
'''simple docstring'''
# check if the u exists
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowercase__: str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowercase__: Union[str, Any] = [[w, u]]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> List[str]:
'''simple docstring'''
if s == d:
return []
lowercase__: str = []
lowercase__: int = []
if s == -2:
lowercase__: Tuple = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: int = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: int = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
lowercase__: Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-1 ) -> Optional[Any]:
'''simple docstring'''
if c == -1:
lowercase__: Any = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase__: Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> List[Any]:
'''simple docstring'''
lowercase__: str = deque()
lowercase__: List[Any] = []
if s == -2:
lowercase__: str = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
lowercase__: Union[str, Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return len(self.graph[u] )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: str = []
lowercase__: Dict = []
lowercase__: Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Dict = -2
lowercase__: Dict = []
lowercase__: List[Any] = s
lowercase__: Union[str, Any] = False
lowercase__: List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Any = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: str = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: Dict = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: int = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: Tuple = s
lowercase__: List[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: Tuple = []
lowercase__: Optional[int] = []
lowercase__: Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
lowercase__: Tuple = -2
lowercase__: Any = []
lowercase__: int = s
lowercase__: Optional[int] = False
lowercase__: List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase__: Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase__: Union[str, Any] = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase__: Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase__: List[str] = True
if len(lowerCAmelCase__ ) != 0:
lowercase__: List[str] = stack[len(lowerCAmelCase__ ) - 1]
else:
lowercase__: Dict = False
indirect_parents.append(lowerCAmelCase__ )
lowercase__: Optional[Any] = s
lowercase__: Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
return list(self.graph )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 , lowerCAmelCase__=-1 ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: List[Any] = time()
return end - begin
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__=-2 ) -> List[Any]:
'''simple docstring'''
lowercase__: str = time()
self.bfs(lowerCAmelCase__ )
lowercase__: List[str] = time()
return end - begin
| 288 | 1 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_lowerCamelCase : str = logging.getLogger()
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = argparse.ArgumentParser()
parser.add_argument('''-f''' )
A__ = parser.parse_args()
return args.f
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->None:
'''simple docstring'''
A__ = logging.StreamHandler(sys.stdout)
logger.addHandler(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : str) ->Tuple:
'''simple docstring'''
A__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , '''run_glue_deebert.py''')
with patch.object(UpperCAmelCase__ , '''argv''' , UpperCAmelCase__):
A__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(UpperCAmelCase__ , 0.666)
@slow
@require_torch_non_multi_gpu
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str:
'''simple docstring'''
A__ = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(UpperCAmelCase__)
A__ = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(UpperCAmelCase__)
A__ = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(UpperCAmelCase__)
| 14 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : int = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 1 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case_ (self ) -> Tuple:
UpperCamelCase = 1
UpperCamelCase = 3
UpperCamelCase = (32, 32)
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def snake_case_ (self ) -> Any:
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def snake_case_ (self ) -> Tuple:
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def snake_case_ (self ) -> Dict:
torch.manual_seed(0 )
UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__a )
@property
def snake_case_ (self ) -> List[Any]:
def extract(*__a , **__a ):
class _lowerCamelCase :
def __init__(self ) -> List[Any]:
UpperCamelCase = torch.ones([0] )
def snake_case_ (self , __a ) -> Optional[Any]:
self.pixel_values.to(__a )
return self
return Out()
return extract
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.dummy_cond_unet
UpperCamelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , )
UpperCamelCase = self.dummy_vae
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
UpperCamelCase = StableDiffusionPipeline(
unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , )
UpperCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = "A painting of a squirrel eating a burger"
UpperCamelCase = torch.Generator(device=__a ).manual_seed(0 )
UpperCamelCase = sd_pipe([prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
UpperCamelCase = output.images
UpperCamelCase = torch.Generator(device=__a ).manual_seed(0 )
UpperCamelCase = sd_pipe(
[prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.dummy_cond_unet
UpperCamelCase = PNDMScheduler(skip_prk_steps=__a )
UpperCamelCase = self.dummy_vae
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
UpperCamelCase = StableDiffusionPipeline(
unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , )
UpperCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = "A painting of a squirrel eating a burger"
UpperCamelCase = torch.Generator(device=__a ).manual_seed(0 )
UpperCamelCase = sd_pipe([prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
UpperCamelCase = output.images
UpperCamelCase = torch.Generator(device=__a ).manual_seed(0 )
UpperCamelCase = sd_pipe(
[prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ (self ) -> Any:
UpperCamelCase = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=__a )
assert isinstance(__a , __a )
assert isinstance(pipe.scheduler , __a )
assert pipe.safety_checker is None
UpperCamelCase = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__a )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(__a )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCamelCase = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = self.dummy_cond_unet
UpperCamelCase = PNDMScheduler(skip_prk_steps=__a )
UpperCamelCase = self.dummy_vae
UpperCamelCase = self.dummy_text_encoder
UpperCamelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
UpperCamelCase = unet.half()
UpperCamelCase = vae.half()
UpperCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase = StableDiffusionPipeline(
unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , )
UpperCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = "A painting of a squirrel eating a burger"
UpperCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ (self ) -> int:
UpperCamelCase = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=__a )
UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
UpperCamelCase = 40_03_66_03_46
UpperCamelCase = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCamelCase = torch.manual_seed(__a )
UpperCamelCase = sd_pipe(
[prompt] , generator=__a , guidance_scale=__a , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
UpperCamelCase = torch.manual_seed(__a )
UpperCamelCase = sd_pipe(
[prompt] , generator=__a , guidance_scale=__a , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=__a )
UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = "padme amidala taking a bath artwork, safe for work, no nudity"
UpperCamelCase = 27_34_97_17_55
UpperCamelCase = 7
UpperCamelCase = torch.manual_seed(__a )
UpperCamelCase = sd_pipe(
[prompt] , generator=__a , guidance_scale=__a , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
UpperCamelCase = torch.manual_seed(__a )
UpperCamelCase = sd_pipe(
[prompt] , generator=__a , guidance_scale=__a , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
UpperCamelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
UpperCamelCase = 10_44_35_52_34
UpperCamelCase = 12
UpperCamelCase = torch.manual_seed(__a )
UpperCamelCase = sd_pipe(
[prompt] , generator=__a , guidance_scale=__a , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
UpperCamelCase = torch.manual_seed(__a )
UpperCamelCase = sd_pipe(
[prompt] , generator=__a , guidance_scale=__a , num_inference_steps=50 , output_type="np" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 244 |
"""simple docstring"""
import math
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = 0
while num > 0:
UpperCamelCase = num % 8
UpperCamelCase = octal + (remainder * math.floor(math.pow(10 , _SCREAMING_SNAKE_CASE ) ))
counter += 1
UpperCamelCase = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"0o{int(_SCREAMING_SNAKE_CASE )}"
def a__ ( ):
"""simple docstring"""
print("\n2 in octal is:" )
print(decimal_to_octal(2 ) ) # = 2
print("\n8 in octal is:" )
print(decimal_to_octal(8 ) ) # = 10
print("\n65 in octal is:" )
print(decimal_to_octal(65 ) ) # = 101
print("\n216 in octal is:" )
print(decimal_to_octal(216 ) ) # = 330
print("\n512 in octal is:" )
print(decimal_to_octal(512 ) ) # = 1000
print("\n" )
if __name__ == "__main__":
main()
| 244 | 1 |
import math
import os
import sys
def __lowerCAmelCase ( a__ ) -> str:
__a = ''''''
try:
with open(a__ , '''rb''' ) as binary_file:
__a = binary_file.read()
for dat in data:
__a = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> None:
lexicon.pop(a__ )
__a = last_match_id
if math.loga(a__ ).is_integer():
for curr_key in lexicon:
__a = '''0''' + lexicon[curr_key]
__a = bin(a__ )[2:]
def __lowerCAmelCase ( a__ ) -> str:
__a = {'''0''': '''0''', '''1''': '''1'''}
__a , __a = '''''', ''''''
__a = len(a__ )
for i in range(len(a__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__a = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(a__ , a__ , a__ , a__ )
index += 1
__a = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__a = lexicon[curr_string]
result += last_match_id
return result
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = os.path.getsize(a__ )
__a = bin(a__ )[2:]
__a = len(a__ )
return "0" * (length_length - 1) + file_length_binary + compressed
def __lowerCAmelCase ( a__ , a__ ) -> None:
__a = 8
try:
with open(a__ , '''wb''' ) as opened_file:
__a = [
to_write[i : i + byte_length]
for i in range(0 , len(a__ ) , a__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(a__ , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def __lowerCAmelCase ( a__ , a__ ) -> None:
__a = read_file_binary(a__ )
__a = compress_data(a__ )
__a = add_file_length(a__ , a__ )
write_file_binary(a__ , a__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2]) | 6 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Union[str, Any]):
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() ,encoding='utf-8' ,check=SCREAMING_SNAKE_CASE__ ,)
assert hasattr(self ,'env')
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int):
# configuration for running training on smdistributed Model Parallel
__lowerCamelCase : Any = {
'enabled': True,
'processes_per_host': 8,
}
__lowerCamelCase : List[Any] = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
__lowerCamelCase : str = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
__lowerCamelCase : List[str] = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"{self.env.base_job_name}-{instance_count}-smp-{name_extension}" ,instance_count=SCREAMING_SNAKE_CASE__ ,instance_type=self.instance_type ,debugger_hook_config=SCREAMING_SNAKE_CASE__ ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 5_0_0,
} ,metric_definitions=self.env.metric_definitions ,distribution=SCREAMING_SNAKE_CASE__ ,py_version='py36' ,)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any):
TrainingJobAnalytics(SCREAMING_SNAKE_CASE__).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(1,)])
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
# create estimator
__lowerCamelCase : str = self.create_estimator(SCREAMING_SNAKE_CASE__)
# run training
estimator.fit()
# result dataframe
__lowerCamelCase : List[str] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
__lowerCamelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'])
__lowerCamelCase : Any = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCamelCase : str = (
Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds' ,9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy)
assert all(t <= self.results['eval_loss'] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" ,'w') as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,SCREAMING_SNAKE_CASE__)
| 73 | 0 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_lowerCAmelCase : int = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class A_ ( unittest.TestCase ):
def _lowercase ( self: str ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple = None ,__lowerCAmelCase: List[Any] = None ,__lowerCAmelCase: List[Any] = None ,__lowerCAmelCase: str = True ,):
'''simple docstring'''
_lowerCamelCase : List[str] = [file for file in os.listdir(lowercase_ ) if os.path.isfile(os.path.join(lowercase_ ,lowercase_ ) )]
if identifier is not None:
_lowerCamelCase : Dict = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ ,lowercase_ ):
for n_ in n_identifier:
_lowerCamelCase : str = [file for file in files if n_ not in file]
else:
_lowerCamelCase : Any = [file for file in files if n_identifier not in file]
_lowerCamelCase : Union[str, Any] = ignore_files or []
ignore_files.append("__init__.py" )
_lowerCamelCase : Optional[int] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" ,lowercase_ )
if only_modules:
_lowerCamelCase : str = file.split("." )[0]
try:
_lowerCamelCase : str = getattr(lowercase_ ,lowercase_ )
_lowerCamelCase : Tuple = doctest.DocTestSuite(lowercase_ )
_lowerCamelCase : int = unittest.TextTestRunner().run(lowercase_ )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
_lowerCamelCase : Optional[Any] = doctest.testfile(str(".." / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : int = Path("src/transformers" )
_lowerCamelCase : str = "modeling"
_lowerCamelCase : Optional[Any] = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(lowercase_ ,identifier=lowercase_ ,ignore_files=lowercase_ )
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[Any] = Path("src/transformers" )
_lowerCamelCase : Any = "tokenization"
self.analyze_directory(lowercase_ ,identifier=lowercase_ )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = Path("src/transformers" )
_lowerCamelCase : List[Any] = "configuration"
self.analyze_directory(lowercase_ ,identifier=lowercase_ )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = Path("src/transformers" )
_lowerCamelCase : List[Any] = ["configuration", "modeling", "tokenization"]
self.analyze_directory(lowercase_ ,n_identifier=lowercase_ )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = Path("docs/source" )
_lowerCamelCase : Union[str, Any] = ["favicon.ico"]
self.analyze_directory(lowercase_ ,ignore_files=lowercase_ ,only_modules=lowercase_ ) | 351 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_lowerCAmelCase : Optional[int] = '''__DUMMY_TRANSFORMERS_USER__'''
_lowerCAmelCase : Dict = '''Dummy User'''
_lowerCAmelCase : Optional[int] = '''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'''
_lowerCAmelCase : Tuple = '''https://hub-ci.huggingface.co'''
_lowerCAmelCase : Any = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
_lowerCAmelCase : Tuple = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
_lowerCAmelCase : Dict = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT" , _lowerCamelCase )
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , _lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
HfFolder.save_token(_lowerCamelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="session" )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
return HfApi(endpoint=_lowerCamelCase )
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Dict = HfFolder.get_token()
HfFolder.save_token(_lowerCamelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_lowerCamelCase )
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
def _cleanup_repo(_lowerCamelCase ):
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
return _cleanup_repo
@pytest.fixture
def lowerCamelCase_( _lowerCamelCase ) -> List[str]:
'''simple docstring'''
@contextmanager
def _temporary_repo(_lowerCamelCase ):
try:
yield repo_id
finally:
cleanup_repo(_lowerCamelCase )
return _temporary_repo
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Tuple = F"""repo_txt_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : List[str] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data/text_data.txt" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : Dict = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session" )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Any = F"""repo_zipped_img_data-{int(time.time() * 10e3 )}"""
_lowerCamelCase : List[Any] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" , private=_lowerCamelCase )
hf_api.upload_file(
token=_lowerCamelCase , path_or_fileobj=str(_lowerCamelCase ) , path_in_repo="data.zip" , repo_id=_lowerCamelCase , repo_type="dataset" , )
yield repo_id
try:
hf_api.delete_repo(_lowerCamelCase , token=_lowerCamelCase , repo_type="dataset" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_ | 340 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : List[Any] ):
_UpperCAmelCase : Union[str, Any] = []
def _A ( self : Any , A : Union[str, Any] , A : Optional[int] , A : List[str] , **A : Tuple ):
self.events.append("on_init_end" )
def _A ( self : Any , A : str , A : List[Any] , A : List[Any] , **A : Tuple ):
self.events.append("on_train_begin" )
def _A ( self : Tuple , A : List[str] , A : Tuple , A : int , **A : List[str] ):
self.events.append("on_train_end" )
def _A ( self : Optional[Any] , A : Dict , A : Any , A : Optional[Any] , **A : List[Any] ):
self.events.append("on_epoch_begin" )
def _A ( self : Optional[Any] , A : List[Any] , A : List[str] , A : Optional[int] , **A : Optional[int] ):
self.events.append("on_epoch_end" )
def _A ( self : List[str] , A : Optional[int] , A : List[Any] , A : Union[str, Any] , **A : Any ):
self.events.append("on_step_begin" )
def _A ( self : Tuple , A : Union[str, Any] , A : int , A : Optional[int] , **A : int ):
self.events.append("on_step_end" )
def _A ( self : Optional[int] , A : Optional[Any] , A : Union[str, Any] , A : str , **A : Union[str, Any] ):
self.events.append("on_evaluate" )
def _A ( self : Optional[Any] , A : Optional[int] , A : Dict , A : List[Any] , **A : Dict ):
self.events.append("on_predict" )
def _A ( self : Dict , A : Dict , A : List[Any] , A : Dict , **A : str ):
self.events.append("on_save" )
def _A ( self : Tuple , A : Optional[Any] , A : Union[str, Any] , A : Optional[int] , **A : Dict ):
self.events.append("on_log" )
def _A ( self : Optional[int] , A : Optional[Any] , A : Tuple , A : Tuple , **A : List[str] ):
self.events.append("on_prediction_step" )
@require_torch
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[int] ):
_UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
def _A ( self : List[Any] ):
shutil.rmtree(self.output_dir )
def _A ( self : Union[str, Any] , A : Optional[int]=0 , A : Optional[Any]=0 , A : Optional[Any]=64 , A : Dict=64 , A : Any=None , A : Tuple=False , **A : Optional[int] ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
_UpperCAmelCase : str = RegressionDataset(length=A )
_UpperCAmelCase : Union[str, Any] = RegressionDataset(length=A )
_UpperCAmelCase : Any = RegressionModelConfig(a=A , b=A )
_UpperCAmelCase : List[Any] = RegressionPreTrainedModel(A )
_UpperCAmelCase : Dict = TrainingArguments(self.output_dir , disable_tqdm=A , report_to=[] , **A )
return Trainer(
A , A , train_dataset=A , eval_dataset=A , callbacks=A , )
def _A ( self : str , A : List[str] , A : List[str] ):
self.assertEqual(len(A ) , len(A ) )
# Order doesn't matter
_UpperCAmelCase : Tuple = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ )
_UpperCAmelCase : Any = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ )
for cba, cba in zip(A , A ):
if isinstance(A , A ) and isinstance(A , A ):
self.assertEqual(A , A )
elif isinstance(A , A ) and not isinstance(A , A ):
self.assertEqual(A , cba.__class__ )
elif not isinstance(A , A ) and isinstance(A , A ):
self.assertEqual(cba.__class__ , A )
else:
self.assertEqual(A , A )
def _A ( self : int , A : List[str] ):
_UpperCAmelCase : List[str] = ["on_init_end", "on_train_begin"]
_UpperCAmelCase : str = 0
_UpperCAmelCase : Optional[Any] = len(trainer.get_eval_dataloader() )
_UpperCAmelCase : Optional[int] = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(A ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _A ( self : str ):
_UpperCAmelCase : Any = self.get_trainer()
_UpperCAmelCase : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# Callbacks passed at init are added to the default callbacks
_UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
_UpperCAmelCase : List[Any] = self.get_trainer(disable_tqdm=A )
_UpperCAmelCase : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
_UpperCAmelCase : Dict = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(A )
expected_callbacks.remove(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
_UpperCAmelCase : Optional[Any] = self.get_trainer()
_UpperCAmelCase : Any = trainer.pop_callback(A )
self.assertEqual(cb.__class__ , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
trainer.add_callback(A )
expected_callbacks.insert(0 , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# We can also add, pop, or remove by instance
_UpperCAmelCase : Union[str, Any] = self.get_trainer()
_UpperCAmelCase : List[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(A )
expected_callbacks.remove(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
_UpperCAmelCase : List[Any] = self.get_trainer()
_UpperCAmelCase : List[Any] = trainer.callback_handler.callbacks[0]
_UpperCAmelCase : Union[str, Any] = trainer.pop_callback(A )
self.assertEqual(A , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
trainer.add_callback(A )
expected_callbacks.insert(0 , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
def _A ( self : Optional[Any] ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=A )
_UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
_UpperCAmelCase : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# Independent log/save/eval
_UpperCAmelCase : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
_UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
_UpperCAmelCase : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
_UpperCAmelCase : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
_UpperCAmelCase : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
_UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
_UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
_UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# A bit of everything
_UpperCAmelCase : int = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
_UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
_UpperCAmelCase : Optional[Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(A ) in warn_mock.call_args[0][0]
| 31 | '''simple docstring'''
from typing import Any
def UpperCamelCase_ ( _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : dict , _UpperCAmelCase : dict , _UpperCAmelCase : dict , ) -> list:
"""simple docstring"""
_validation(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
# Creates data structures and fill initial step
_UpperCAmelCase : dict = {}
_UpperCAmelCase : dict = {}
for state in states_space:
_UpperCAmelCase : Union[str, Any] = observations_space[0]
_UpperCAmelCase : Tuple = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
_UpperCAmelCase : List[str] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(_UpperCAmelCase ) ):
_UpperCAmelCase : Optional[Any] = observations_space[o]
_UpperCAmelCase : int = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_UpperCAmelCase : str = ""
_UpperCAmelCase : Tuple = -1
for k_state in states_space:
_UpperCAmelCase : Any = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_UpperCAmelCase : Union[str, Any] = probability
_UpperCAmelCase : str = k_state
# Update probabilities and pointers dicts
_UpperCAmelCase : Optional[int] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_UpperCAmelCase : Tuple = arg_max
# The final observation
_UpperCAmelCase : Optional[Any] = observations_space[len(_UpperCAmelCase ) - 1]
# argmax for given final observation
_UpperCAmelCase : List[str] = ""
_UpperCAmelCase : Any = -1
for k_state in states_space:
_UpperCAmelCase : Optional[int] = probabilities[(k_state, final_observation)]
if probability > max_probability:
_UpperCAmelCase : int = probability
_UpperCAmelCase : Dict = k_state
_UpperCAmelCase : Dict = arg_max
# Process pointers backwards
_UpperCAmelCase : List[Any] = last_state
_UpperCAmelCase : str = []
for o in range(len(_UpperCAmelCase ) - 1 , -1 , -1 ):
result.append(_UpperCAmelCase )
_UpperCAmelCase : List[Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , ) -> None:
"""simple docstring"""
_validate_not_empty(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
_validate_lists(_UpperCAmelCase , _UpperCAmelCase )
_validate_dicts(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : Any ) -> None:
"""simple docstring"""
_validate_list(_UpperCAmelCase , "observations_space" )
_validate_list(_UpperCAmelCase , "states_space" )
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> None:
"""simple docstring"""
if not isinstance(_object , _UpperCAmelCase ):
_UpperCAmelCase : Optional[int] = F"""{var_name} must be a list"""
raise ValueError(_UpperCAmelCase )
else:
for x in _object:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase : Optional[int] = F"""{var_name} must be a list of strings"""
raise ValueError(_UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , ) -> None:
"""simple docstring"""
_validate_dict(_UpperCAmelCase , "initial_probabilities" , _UpperCAmelCase )
_validate_nested_dict(_UpperCAmelCase , "transition_probabilities" )
_validate_nested_dict(_UpperCAmelCase , "emission_probabilities" )
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> None:
"""simple docstring"""
_validate_dict(_object , _UpperCAmelCase , _UpperCAmelCase )
for x in _object.values():
_validate_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : type , _UpperCAmelCase : bool = False ) -> None:
"""simple docstring"""
if not isinstance(_object , _UpperCAmelCase ):
_UpperCAmelCase : Any = F"""{var_name} must be a dict"""
raise ValueError(_UpperCAmelCase )
if not all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for x in _object ):
_UpperCAmelCase : Tuple = F"""{var_name} all keys must be strings"""
raise ValueError(_UpperCAmelCase )
if not all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for x in _object.values() ):
_UpperCAmelCase : List[str] = "nested dictionary " if nested else ""
_UpperCAmelCase : List[str] = F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 31 | 1 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowerCAmelCase__ :Dict = TypeVar('''T''')
def lowerCAmelCase__ ( a__: int ) -> int:
'''simple docstring'''
return (position - 1) // 2
def lowerCAmelCase__ ( a__: int ) -> int:
'''simple docstring'''
return (2 * position) + 1
def lowerCAmelCase__ ( a__: int ) -> int:
'''simple docstring'''
return (2 * position) + 2
class __a ( Generic[T] ):
def __init__( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = {}
_UpperCAmelCase = 0
def __len__( self ) -> int:
"""simple docstring"""
return self.elements
def __repr__( self ) -> str:
"""simple docstring"""
return str(self.heap )
def UpperCAmelCase__ ( self ) -> bool:
"""simple docstring"""
return self.elements == 0
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
self.heap.append((elem, weight) )
_UpperCAmelCase = self.elements
self.elements += 1
self._bubble_up(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> T:
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_UpperCAmelCase , _UpperCAmelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_UpperCAmelCase , _UpperCAmelCase = self.heap[0]
self._bubble_down(_SCREAMING_SNAKE_CASE )
return elem
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
_UpperCAmelCase = self.position_map[elem]
_UpperCAmelCase = (elem, weight)
if position > 0:
_UpperCAmelCase = get_parent_position(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_SCREAMING_SNAKE_CASE )
else:
self._bubble_down(_SCREAMING_SNAKE_CASE )
else:
self._bubble_down(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
_UpperCAmelCase = self.position_map[elem]
if curr_pos == 0:
return None
_UpperCAmelCase = get_parent_position(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = self.heap[curr_pos]
_UpperCAmelCase , _UpperCAmelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return self._bubble_up(_SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
_UpperCAmelCase = self.position_map[elem]
_UpperCAmelCase , _UpperCAmelCase = self.heap[curr_pos]
_UpperCAmelCase = get_child_left_position(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = get_child_right_position(_SCREAMING_SNAKE_CASE )
if child_left_position < self.elements and child_right_position < self.elements:
_UpperCAmelCase , _UpperCAmelCase = self.heap[child_left_position]
_UpperCAmelCase , _UpperCAmelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return self._bubble_down(_SCREAMING_SNAKE_CASE )
if child_left_position < self.elements:
_UpperCAmelCase , _UpperCAmelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return self._bubble_down(_SCREAMING_SNAKE_CASE )
else:
return None
if child_right_position < self.elements:
_UpperCAmelCase , _UpperCAmelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return self._bubble_down(_SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
_UpperCAmelCase = self.heap[nodea_pos][0]
_UpperCAmelCase = self.heap[nodea_pos][0]
_UpperCAmelCase , _UpperCAmelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_UpperCAmelCase = nodea_pos
_UpperCAmelCase = nodea_pos
class __a ( Generic[T] ):
def __init__( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = 0
def __repr__( self ) -> str:
"""simple docstring"""
return str(self.connections )
def __len__( self ) -> int:
"""simple docstring"""
return self.nodes
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if node not in self.connections:
_UpperCAmelCase = {}
self.nodes += 1
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
self.add_node(_SCREAMING_SNAKE_CASE )
self.add_node(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = weight
_UpperCAmelCase = weight
def lowerCAmelCase__ ( a__: GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
'''simple docstring'''
_UpperCAmelCase = {node: maxsize for node in graph.connections}
_UpperCAmelCase = {node: None for node in graph.connections}
_UpperCAmelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(a__ , a__ )
if priority_queue.is_empty():
return dist, parent
# initialization
_UpperCAmelCase = priority_queue.extract_min()
_UpperCAmelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_UpperCAmelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(a__ , dist[neighbour] )
_UpperCAmelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
_UpperCAmelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_UpperCAmelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(a__ , dist[neighbour] )
_UpperCAmelCase = node
return dist, parent
| 185 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCAmelCase__ ( a__: NDArray[floataa] , a__: NDArray[floataa] , a__: list[int] , a__: int , ) -> list[float]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = coefficient_matrix.shape
_UpperCAmelCase , _UpperCAmelCase = constant_matrix.shape
if rowsa != colsa:
_UpperCAmelCase = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(a__ )
if colsa != 1:
_UpperCAmelCase = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(a__ )
if rowsa != rowsa:
_UpperCAmelCase = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(a__ )
if len(a__ ) != rowsa:
_UpperCAmelCase = (
'Number of initial values must be equal to number of rows in coefficient '
F'''matrix but received {len(a__ )} and {rowsa}'''
)
raise ValueError(a__ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
_UpperCAmelCase = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_UpperCAmelCase , _UpperCAmelCase = table.shape
strictly_diagonally_dominant(a__ )
# Iterates the whole matrix for given number of times
for _ in range(a__ ):
_UpperCAmelCase = []
for row in range(a__ ):
_UpperCAmelCase = 0
for col in range(a__ ):
if col == row:
_UpperCAmelCase = table[row][col]
elif col == cols - 1:
_UpperCAmelCase = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_UpperCAmelCase = (temp + val) / denom
new_val.append(a__ )
_UpperCAmelCase = new_val
return [float(a__ ) for i in new_val]
def lowerCAmelCase__ ( a__: NDArray[floataa] ) -> bool:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = table.shape
_UpperCAmelCase = True
for i in range(0 , a__ ):
_UpperCAmelCase = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185 | 1 |
class __magic_name__ :
"""simple docstring"""
def __init__( self :str , snake_case :Tuple ):
'''simple docstring'''
A_ : List[str] = len(a_ )
A_ : Dict = [0] * len_array
if len_array > 0:
A_ : List[Any] = array[0]
for i in range(1 , a_ ):
A_ : Tuple = self.prefix_sum[i - 1] + array[i]
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :Any , snake_case :Dict ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :int ):
'''simple docstring'''
A_ : str = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(a_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300 |
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = (IPNDMScheduler,)
UpperCAmelCase = (("""num_inference_steps""", 50),)
def _snake_case ( self ,**a_ ) -> Optional[int]:
_UpperCAmelCase : str = {"""num_train_timesteps""": 1_000}
config.update(**a_ )
return config
def _snake_case ( self ,a_=0 ,**a_ ) -> List[str]:
_UpperCAmelCase : Any = dict(self.forward_default_kwargs )
_UpperCAmelCase : Any = kwargs.pop("""num_inference_steps""" ,a_ )
_UpperCAmelCase : Union[str, Any] = self.dummy_sample
_UpperCAmelCase : Union[str, Any] = 0.1 * sample
_UpperCAmelCase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : str = self.get_scheduler_config(**a_ )
_UpperCAmelCase : List[str] = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_UpperCAmelCase : str = dummy_past_residuals[:]
if time_step is None:
_UpperCAmelCase : List[str] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_UpperCAmelCase : List[str] = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_UpperCAmelCase : Tuple = dummy_past_residuals[:]
_UpperCAmelCase : Dict = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
_UpperCAmelCase : Any = new_scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_UpperCAmelCase : str = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
_UpperCAmelCase : Any = new_scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ) -> Any:
pass
def _snake_case ( self ,a_=0 ,**a_ ) -> Dict:
_UpperCAmelCase : str = dict(self.forward_default_kwargs )
_UpperCAmelCase : Union[str, Any] = kwargs.pop("""num_inference_steps""" ,a_ )
_UpperCAmelCase : Optional[int] = self.dummy_sample
_UpperCAmelCase : Tuple = 0.1 * sample
_UpperCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
_UpperCAmelCase : Dict = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase : Any = dummy_past_residuals[:]
if time_step is None:
_UpperCAmelCase : Optional[int] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_UpperCAmelCase : str = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase : Dict = dummy_past_residuals[:]
_UpperCAmelCase : Tuple = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
_UpperCAmelCase : str = new_scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_UpperCAmelCase : str = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
_UpperCAmelCase : str = new_scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _snake_case ( self ,**a_ ) -> List[Any]:
_UpperCAmelCase : int = self.scheduler_classes[0]
_UpperCAmelCase : List[str] = self.get_scheduler_config(**a_ )
_UpperCAmelCase : List[Any] = scheduler_class(**a_ )
_UpperCAmelCase : List[Any] = 10
_UpperCAmelCase : Dict = self.dummy_model()
_UpperCAmelCase : Dict = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : Optional[Any] = model(a_ ,a_ )
_UpperCAmelCase : Tuple = scheduler.step(a_ ,a_ ,a_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : List[str] = model(a_ ,a_ )
_UpperCAmelCase : int = scheduler.step(a_ ,a_ ,a_ ).prev_sample
return sample
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : int = dict(self.forward_default_kwargs )
_UpperCAmelCase : Optional[Any] = kwargs.pop("""num_inference_steps""" ,a_ )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : List[str] = scheduler_class(**a_ )
_UpperCAmelCase : Optional[int] = self.dummy_sample
_UpperCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(a_ ,"""set_timesteps""" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_ ,"""set_timesteps""" ):
_UpperCAmelCase : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_UpperCAmelCase : Optional[int] = dummy_past_residuals[:]
_UpperCAmelCase : Tuple = scheduler.timesteps[5]
_UpperCAmelCase : Optional[Any] = scheduler.timesteps[6]
_UpperCAmelCase : Optional[Any] = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
_UpperCAmelCase : Optional[int] = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
_UpperCAmelCase : Dict = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
_UpperCAmelCase : Any = scheduler.step(a_ ,a_ ,a_ ,**a_ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def _snake_case ( self ) -> List[Any]:
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=a_ ,time_step=a_ )
def _snake_case ( self ) -> int:
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=a_ ,time_step=a_ )
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = self.full_loop()
_UpperCAmelCase : Any = torch.mean(torch.abs(a_ ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 215 | 0 |
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
A = logging.get_logger(__name__)
@add_end_docstrings(_UpperCamelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , **_UpperCAmelCase ):
super().__init__(**_UpperCAmelCase )
requires_backends(self , '''vision''' )
requires_backends(self , '''torch''' )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(_UpperCAmelCase )
def _lowerCamelCase ( self , **_UpperCAmelCase ):
__a : Dict = {}
__a : List[str] = {}
__a : Dict = {}
# preprocess args
if "points_per_batch" in kwargs:
__a : List[str] = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
__a : List[str] = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
__a : Dict = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
__a : Optional[int] = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
__a : str = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
__a : List[str] = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
__a : str = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
__a : Union[str, Any] = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
__a : Tuple = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
__a : Tuple = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
__a : Tuple = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
__a : List[str] = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , _UpperCAmelCase , *_UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
return super().__call__(_UpperCAmelCase , *_UpperCAmelCase , num_workers=_UpperCAmelCase , batch_size=_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase = 0 , _UpperCAmelCase = 512 / 1500 , _UpperCAmelCase = 32 , _UpperCAmelCase = 1 , ):
__a : Any = load_image(_UpperCAmelCase )
__a : Dict = self.image_processor.size['''longest_edge''']
__a : Tuple = self.image_processor.generate_crop_boxes(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__a : Optional[Any] = self.image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
__a : Tuple = self.get_inference_context()
with inference_context():
__a : Optional[int] = self._ensure_tensor_on_device(_UpperCAmelCase , device=self.device )
__a : Tuple = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
__a : Any = image_embeddings
__a : List[Any] = grid_points.shape[1]
__a : List[str] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
__a : Dict = grid_points[:, i : i + points_per_batch, :, :]
__a : Any = input_labels[:, i : i + points_per_batch]
__a : Tuple = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0.8_8 , _UpperCAmelCase=0.9_5 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , ):
__a : Dict = model_inputs.pop('''input_boxes''' )
__a : List[Any] = model_inputs.pop('''is_last''' )
__a : int = model_inputs.pop('''original_sizes''' ).tolist()
__a : Dict = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
__a : Tuple = self.model(**_UpperCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
__a : str = model_outputs['''pred_masks''']
__a : Dict = self.image_processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , binarize=_UpperCAmelCase )
__a : Dict = model_outputs['''iou_scores''']
__a : Any = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.7 , ):
__a : List[str] = []
__a : Union[str, Any] = []
__a : str = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
__a : Optional[int] = torch.cat(_UpperCAmelCase )
__a : int = torch.cat(_UpperCAmelCase )
__a : Optional[int] = self.image_processor.post_process_for_mask_generation(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__a : List[Any] = defaultdict(_UpperCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(_UpperCAmelCase )
__a : List[Any] = {}
if output_rle_mask:
__a : Tuple = rle_mask
if output_bboxes_mask:
__a : List[Any] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 353 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
__lowerCAmelCase = '''CIDAS/clipseg-rd64-refined'''
__lowerCAmelCase = '''image_segmenter'''
__lowerCAmelCase = CLIPSegForImageSegmentation
__lowerCAmelCase = ['''image''', '''text''']
__lowerCAmelCase = ['''image''']
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''vision'''] )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
return self.pre_processor(text=[label] , images=[image] , padding=_UpperCAmelCase , return_tensors='''pt''' )
def _lowerCamelCase ( self , _UpperCAmelCase ):
with torch.no_grad():
__a : List[str] = self.model(**_UpperCAmelCase ).logits
return logits
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : str = outputs.cpu().detach().numpy()
__a : int = 0
__a : Optional[int] = 1
return Image.fromarray((array * 255).astype(np.uinta ) ) | 188 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__UpperCamelCase : List[str] = '''
Human: <<task>>
Assistant: '''
__UpperCamelCase : Union[str, Any] = '''huggingface-tools/default-prompts'''
__UpperCamelCase : int = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_="run" ):
if prompt_or_repo_id is None:
lowerCAmelCase__ : List[str] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , A_ ) is not None:
return prompt_or_repo_id
lowerCAmelCase__ : str = cached_file(
A_ , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(A_ , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 106 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__UpperCamelCase : Optional[Any] = '''scheduler_config.json'''
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 1
lowercase__ = 2
lowercase__ = 3
lowercase__ = 4
lowercase__ = 5
@dataclass
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 42
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = SCHEDULER_CONFIG_NAME
lowercase__ = ["dtype"]
lowercase__ = []
lowercase__ = True
@classmethod
def __lowerCAmelCase ( cls : List[Any] ,lowercase_ : Dict[str, Any] = None ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[int]=False ,**lowercase_ : Any ,):
lowerCAmelCase__ ,lowerCAmelCase__ : Dict = cls.load_config(
pretrained_model_name_or_path=lowercase_ ,subfolder=lowercase_ ,return_unused_kwargs=lowercase_ ,**lowercase_ ,)
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = cls.from_config(lowercase_ ,return_unused_kwargs=lowercase_ ,**lowercase_ )
if hasattr(lowercase_ ,'''create_state''' ) and getattr(lowercase_ ,'''has_state''' ,lowercase_ ):
lowerCAmelCase__ : List[Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __lowerCAmelCase ( self : Tuple ,lowercase_ : Union[str, os.PathLike] ,lowercase_ : bool = False ,**lowercase_ : str ):
self.save_config(save_directory=lowercase_ ,push_to_hub=lowercase_ ,**lowercase_ )
@property
def __lowerCAmelCase ( self : List[str] ):
return self._get_compatibles()
@classmethod
def __lowerCAmelCase ( cls : List[Any] ):
lowerCAmelCase__ : Tuple = list(set([cls.__name__] + cls._compatibles ) )
lowerCAmelCase__ : Tuple = importlib.import_module(__name__.split('''.''' )[0] )
lowerCAmelCase__ : Union[str, Any] = [
getattr(lowercase_ ,lowercase_ ) for c in compatible_classes_str if hasattr(lowercase_ ,lowercase_ )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
assert len(A_ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(A_ ) - x.ndim) ) , A_ )
def __SCREAMING_SNAKE_CASE ( A_ , A_=0.999 , A_=jnp.floataa ):
def alpha_bar(A_ ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowerCAmelCase__ : Optional[Any] = []
for i in range(A_ ):
lowerCAmelCase__ : str = i / num_diffusion_timesteps
lowerCAmelCase__ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(A_ ) / alpha_bar(A_ ) , A_ ) )
return jnp.array(A_ , dtype=A_ )
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] ,lowercase_ : List[Any] ):
lowerCAmelCase__ : Optional[int] = scheduler.config
if config.trained_betas is not None:
lowerCAmelCase__ : Any = jnp.asarray(config.trained_betas ,dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCAmelCase__ : Union[str, Any] = jnp.linspace(config.beta_start ,config.beta_end ,config.num_train_timesteps ,dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCAmelCase__ : int = (
jnp.linspace(
config.beta_start**0.5 ,config.beta_end**0.5 ,config.num_train_timesteps ,dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCAmelCase__ : List[Any] = betas_for_alpha_bar(config.num_train_timesteps ,dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
lowerCAmelCase__ : str = 1.0 - betas
lowerCAmelCase__ : Union[str, Any] = jnp.cumprod(lowercase_ ,axis=0 )
return cls(
alphas=lowercase_ ,betas=lowercase_ ,alphas_cumprod=lowercase_ ,)
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ : Any = state.alphas_cumprod
lowerCAmelCase__ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowerCAmelCase__ : Tuple = sqrt_alpha_prod.flatten()
lowerCAmelCase__ : str = broadcast_to_shape_from_left(A_ , original_samples.shape )
lowerCAmelCase__ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCAmelCase__ : Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
lowerCAmelCase__ : Optional[int] = broadcast_to_shape_from_left(A_ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = get_sqrt_alpha_prod(A_ , A_ , A_ , A_ )
lowerCAmelCase__ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = get_sqrt_alpha_prod(A_ , A_ , A_ , A_ )
lowerCAmelCase__ : Union[str, Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 106 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Optional[int] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ['OwlViTFeatureExtractor']
_lowercase : Optional[int] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 360 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_lowercase : str = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : int = 14 ):
if group not in primes:
raise ValueError('''Unsupported Group''' )
__UpperCAmelCase = primes[group]['''prime''']
__UpperCAmelCase = primes[group]['''generator''']
__UpperCAmelCase = int(hexlify(urandom(32 ) ) , base=16 )
def a ( self : int ):
return hex(self.__private_key )[2:]
def a ( self : Dict ):
__UpperCAmelCase = pow(self.generator , self.__private_key , self.prime )
return hex(_lowercase )[2:]
def a ( self : Union[str, Any] , _lowercase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_lowercase , (self.prime - 1) // 2 , self.prime ) == 1
)
def a ( self : Optional[Any] , _lowercase : str ):
__UpperCAmelCase = int(_lowercase , base=16 )
if not self.is_valid_public_key(_lowercase ):
raise ValueError('''Invalid public key''' )
__UpperCAmelCase = pow(_lowercase , self.__private_key , self.prime )
return shaaaa(str(_lowercase ).encode() ).hexdigest()
@staticmethod
def a ( _lowercase : int , _lowercase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_lowercase , (prime - 1) // 2 , _lowercase ) == 1
)
@staticmethod
def a ( _lowercase : str , _lowercase : str , _lowercase : int = 14 ):
__UpperCAmelCase = int(_lowercase , base=16 )
__UpperCAmelCase = int(_lowercase , base=16 )
__UpperCAmelCase = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(_lowercase , _lowercase ):
raise ValueError('''Invalid public key''' )
__UpperCAmelCase = pow(_lowercase , _lowercase , _lowercase )
return shaaaa(str(_lowercase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86 | 0 |
'''simple docstring'''
import torch
from torch import nn
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=1 , UpperCamelCase__ : int=False ):
"""simple docstring"""
super().__init__()
UpperCamelCase = n_token
UpperCamelCase = d_embed
UpperCamelCase = d_proj
UpperCamelCase = cutoffs + [n_token]
UpperCamelCase = [0] + self.cutoffs
UpperCamelCase = div_val
UpperCamelCase = self.cutoffs[0]
UpperCamelCase = len(self.cutoffs ) - 1
UpperCamelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCamelCase = nn.ModuleList()
UpperCamelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase__ , UpperCamelCase__ ) ) )
else:
self.out_projs.append(UpperCamelCase__ )
self.out_layers.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase__ , UpperCamelCase__ ) ) )
self.out_layers.append(nn.Linear(UpperCamelCase__ , r_idx - l_idx ) )
UpperCamelCase = keep_order
def A ( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
if proj is None:
UpperCamelCase = nn.functional.linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCamelCase = nn.functional.linear(UpperCamelCase__ , proj.t().contiguous() )
UpperCamelCase = nn.functional.linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def A ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : str=False ):
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
UpperCamelCase = hidden[..., :-1, :].contiguous()
UpperCamelCase = labels[..., 1:].contiguous()
UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) )
UpperCamelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCamelCase = self._compute_logit(UpperCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCamelCase = labels != -1_0_0
UpperCamelCase = torch.zeros_like(UpperCamelCase__ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase = (
-nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCamelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase , UpperCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i].weight
UpperCamelCase = self.out_layers[i].bias
if i == 0:
UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase__ )
biases.append(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[0], biases[0], self.out_projs[0]
UpperCamelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
if labels is None:
UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCamelCase = torch.zeros_like(UpperCamelCase__ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase = 0
UpperCamelCase = [0] + self.cutoffs
for i in range(len(UpperCamelCase__ ) - 1 ):
UpperCamelCase , UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCamelCase = (labels >= l_idx) & (labels < r_idx)
UpperCamelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCamelCase = labels.index_select(0 , UpperCamelCase__ ) - l_idx
UpperCamelCase = head_logprob.index_select(0 , UpperCamelCase__ )
UpperCamelCase = hidden.index_select(0 , UpperCamelCase__ )
else:
UpperCamelCase = hidden
if i == 0:
if labels is not None:
UpperCamelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[i], biases[i], self.out_projs[i]
UpperCamelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
UpperCamelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCamelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCamelCase = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , UpperCamelCase__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def A ( self : List[Any] , UpperCamelCase__ : str ):
"""simple docstring"""
if self.n_clusters == 0:
UpperCamelCase = self._compute_logit(UpperCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase , UpperCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i].weight
UpperCamelCase = self.out_layers[i].bias
if i == 0:
UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase__ )
biases.append(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[0], biases[0], self.out_projs[0]
UpperCamelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCamelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
UpperCamelCase = [0] + self.cutoffs
for i in range(len(UpperCamelCase__ ) - 1 ):
UpperCamelCase , UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[i], biases[i], self.out_projs[i]
UpperCamelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
UpperCamelCase = head_logprob[:, -i] + tail_logprob_i
UpperCamelCase = logprob_i
return out
| 28 |
from functools import lru_cache
@lru_cache
def __UpperCamelCase ( _A ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 278 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Optional[int] , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
snake_case_ : Dict = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(_A )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = 'sshleifer/tiny-gpt2'
snake_case_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_A , multi_process=_A , )
snake_case_ : Any = TensorFlowBenchmark(_A )
snake_case_ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : int = 'sgugger/tiny-distilbert-classification'
snake_case_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , only_pretrain_model=_A , )
snake_case_ : List[str] = TensorFlowBenchmark(_A )
snake_case_ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
snake_case_ : str = 'sshleifer/tiny-gpt2'
snake_case_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
snake_case_ : Union[str, Any] = TensorFlowBenchmark(_A )
snake_case_ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self : int ) -> List[str]:
"""simple docstring"""
snake_case_ : List[str] = 'sshleifer/tiny-gpt2'
snake_case_ : str = AutoConfig.from_pretrained(_A )
snake_case_ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_A , multi_process=_A , )
snake_case_ : int = TensorFlowBenchmark(_A , [config] )
snake_case_ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ : Any = 'sshleifer/tiny-gpt2'
snake_case_ : List[Any] = AutoConfig.from_pretrained(_A )
snake_case_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
snake_case_ : List[Any] = TensorFlowBenchmark(_A , [config] )
snake_case_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : List[Any] = 'sshleifer/tiny-gpt2'
snake_case_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
snake_case_ : List[Any] = TensorFlowBenchmark(_A )
snake_case_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ : str = 'sshleifer/tiny-gpt2'
snake_case_ : Any = AutoConfig.from_pretrained(_A )
snake_case_ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
snake_case_ : List[Any] = TensorFlowBenchmark(_A , [config] )
snake_case_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = 'patrickvonplaten/t5-tiny-random'
snake_case_ : Optional[Any] = AutoConfig.from_pretrained(_A )
snake_case_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
snake_case_ : Tuple = TensorFlowBenchmark(_A , configs=[config] )
snake_case_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def UpperCAmelCase_ ( self : str ) -> Any:
"""simple docstring"""
snake_case_ : Union[str, Any] = 'sshleifer/tiny-gpt2'
snake_case_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_A , multi_process=_A , )
snake_case_ : Any = TensorFlowBenchmark(_A )
snake_case_ : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self : str ) -> Tuple:
"""simple docstring"""
snake_case_ : List[str] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_A , save_to_csv=_A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_A , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(_A , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(_A , 'env.csv' ) , multi_process=_A , )
snake_case_ : Union[str, Any] = TensorFlowBenchmark(_A )
benchmark.run()
self.assertTrue(Path(os.path.join(_A , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_A , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_A , 'env.csv' ) ).exists() )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(_A : Any ):
self.assertTrue(hasattr(_A , 'sequential' ) )
self.assertTrue(hasattr(_A , 'cumulative' ) )
self.assertTrue(hasattr(_A , 'current' ) )
self.assertTrue(hasattr(_A , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_A , 'log.txt' ) , log_print=_A , trace_memory_line_by_line=_A , eager_mode=_A , multi_process=_A , )
snake_case_ : int = TensorFlowBenchmark(_A )
snake_case_ : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_A , 'log.txt' ) ).exists() )
| 88 |
from __future__ import annotations
import pandas as pd
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : Optional[Any] = [0] * no_of_processes
snake_case_ : Tuple = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__a ):
snake_case_ : Union[str, Any] = burst_time[i]
snake_case_ : Optional[Any] = 0
snake_case_ : Dict = 0
snake_case_ : Any = 9_99_99_99_99
snake_case_ : Tuple = 0
snake_case_ : List[Any] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__a ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
snake_case_ : str = remaining_time[j]
snake_case_ : Any = j
snake_case_ : List[str] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
snake_case_ : Any = remaining_time[short]
if minm == 0:
snake_case_ : Dict = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
snake_case_ : List[str] = False
# Find finish time of current process
snake_case_ : List[str] = increment_time + 1
# Calculate waiting time
snake_case_ : Any = finish_time - arrival_time[short]
snake_case_ : Any = finar - burst_time[short]
if waiting_time[short] < 0:
snake_case_ : Optional[int] = 0
# Increment time
increment_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : Tuple = [0] * no_of_processes
for i in range(__a ):
snake_case_ : str = burst_time[i] + waiting_time[i]
return turn_around_time
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : int = 0
snake_case_ : Optional[Any] = 0
for i in range(__a ):
snake_case_ : int = total_waiting_time + waiting_time[i]
snake_case_ : Optional[Any] = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
_SCREAMING_SNAKE_CASE = int(input())
_SCREAMING_SNAKE_CASE = [0] * no_of_processes
_SCREAMING_SNAKE_CASE = [0] * no_of_processes
_SCREAMING_SNAKE_CASE = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = map(int, input().split())
_SCREAMING_SNAKE_CASE = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_SCREAMING_SNAKE_CASE = burst_time
_SCREAMING_SNAKE_CASE = no_of_processes
_SCREAMING_SNAKE_CASE = waiting_time
_SCREAMING_SNAKE_CASE = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_SCREAMING_SNAKE_CASE = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 88 | 1 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
a = logging.getLogger(__name__)
def _snake_case ( _snake_case : torch.nn.Module , _snake_case : BnbQuantizationConfig , _snake_case : Union[str, os.PathLike] = None , _snake_case : Optional[Dict[str, Union[int, str, torch.device]]] = None , _snake_case : Optional[List[str]] = None , _snake_case : Optional[Dict[Union[int, str], Union[int, str]]] = None , _snake_case : Optional[Union[str, os.PathLike]] = None , _snake_case : bool = False , ) -> int:
'''simple docstring'''
_A = bnb_quantization_config.load_in_abit
_A = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
_A = []
# custom device map
if isinstance(_snake_case , _snake_case ) and len(device_map.keys() ) > 1:
_A = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
_A = get_keys_to_not_convert(_snake_case )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_snake_case )
_A = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
_A = []
_A = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_snake_case )
# compatibility with peft
_A = load_in_abit
_A = load_in_abit
_A = get_parameter_device(_snake_case )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
_A = replace_with_bnb_layers(_snake_case , _snake_case , modules_to_not_convert=_snake_case )
# convert param to the right dtype
_A = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
_A = name.replace('.weight' , '' ).replace('.bias' , '' )
_A = getattr(_snake_case , _snake_case , _snake_case )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_snake_case ):
param.to(_snake_case )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
_A = replace_with_bnb_layers(
_snake_case , _snake_case , modules_to_not_convert=_snake_case )
_A = get_quantized_model_device_map(
_snake_case , _snake_case , _snake_case , max_memory=_snake_case , no_split_module_classes=_snake_case , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
_A = True
_A = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
_snake_case , _snake_case , _snake_case , dtype=bnb_quantization_config.torch_dtype , offload_folder=_snake_case , offload_state_dict=_snake_case , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_snake_case , device_map=_snake_case , offload_dir=_snake_case )
def _snake_case ( _snake_case : List[str] , _snake_case : List[str] , _snake_case : Any=None , _snake_case : Dict=None , _snake_case : int=None ) -> Any:
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
_A = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(_snake_case , _snake_case ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
_A = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
_A = {}
_A = special_dtypes
_A = no_split_module_classes
_A = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
_A = get_balanced_memory(
_snake_case , low_zero=(device_map == 'balanced_low_0') , max_memory=_snake_case , **_snake_case , )
_A = max_memory
_A = infer_auto_device_map(_snake_case , **_snake_case )
if isinstance(_snake_case , _snake_case ):
# check if don't have any quantized module on the cpu
_A = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
_A = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Any , _snake_case : Union[str, Any]=None , _snake_case : Tuple=None ) -> Tuple:
'''simple docstring'''
if modules_to_not_convert is None:
_A = []
_A , _A = _replace_with_bnb_layers(
_snake_case , _snake_case , _snake_case , _snake_case )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def _snake_case ( _snake_case : int , _snake_case : str , _snake_case : str=None , _snake_case : Optional[int]=None , ) -> str:
'''simple docstring'''
_A = False
for name, module in model.named_children():
if current_key_name is None:
_A = []
current_key_name.append(_snake_case )
if isinstance(_snake_case , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
_A = '.'.join(_snake_case )
_A = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
_A = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
_A = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_snake_case , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
_A = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
_A = module.weight.data
if module.bias is not None:
_A = module.bias.data
bnb_module.requires_grad_(_snake_case )
setattr(_snake_case , _snake_case , _snake_case )
_A = True
if len(list(module.children() ) ) > 0:
_A , _A = _replace_with_bnb_layers(
_snake_case , _snake_case , _snake_case , _snake_case )
_A = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _snake_case ( _snake_case : Any ) -> Optional[int]:
'''simple docstring'''
with init_empty_weights():
_A = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
_A = find_tied_parameters(_snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(_snake_case , _snake_case ):
_A = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_A = sum(_snake_case , [] )
_A = len(_snake_case ) > 0
# Check if it is a base model
_A = False
if hasattr(_snake_case , 'base_model_prefix' ):
_A = not hasattr(_snake_case , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_A = list(model.named_children() )
_A = [list_modules[-1][0]]
# add last module together with tied weights
_A = set(_snake_case ) - set(_snake_case )
_A = list(set(_snake_case ) ) + list(_snake_case )
# remove ".weight" from the keys
_A = ['.weight', '.bias']
_A = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_A = name.replace(_snake_case , '' )
filtered_module_names.append(_snake_case )
return filtered_module_names
def _snake_case ( _snake_case : Tuple ) -> Tuple:
'''simple docstring'''
for m in model.modules():
if isinstance(_snake_case , bnb.nn.Linearabit ):
return True
return False
def _snake_case ( _snake_case : nn.Module ) -> Optional[Any]:
'''simple docstring'''
return next(parameter.parameters() ).device
def _snake_case ( _snake_case : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Any , _snake_case : str ) -> Any:
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(_snake_case , _snake_case , 0 , dtype=_snake_case , value=_snake_case )
_A = param_name
_A = model
if "." in tensor_name:
_A = tensor_name.split('.' )
for split in splits[:-1]:
_A = getattr(_snake_case , _snake_case )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
_A = new_module
_A = splits[-1]
# offload weights
_A = False
offload_weight(module._parameters[tensor_name] , _snake_case , _snake_case , index=_snake_case )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , _snake_case , index=_snake_case , )
else:
offload_weight(_snake_case , _snake_case , _snake_case , index=_snake_case )
offload_weight(_snake_case , param_name.replace('weight' , 'SCB' ) , _snake_case , index=_snake_case )
set_module_tensor_to_device(_snake_case , _snake_case , 'meta' , dtype=_snake_case , value=torch.empty(*param.size() ) )
| 315 |
"""simple docstring"""
def _snake_case ( _snake_case : int = 10_00 ) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 315 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 121 |
from __future__ import annotations
lowerCAmelCase__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
lowercase__ : Optional[int] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCamelCase__ ) )
] # the reference grid
lowercase__ : List[Any] = 1
lowercase__ : Tuple = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCamelCase__ ) )
] # the action grid
lowercase__ : Union[str, Any] = init[0]
lowercase__ : List[str] = init[1]
lowercase__ : Optional[Any] = 0
lowercase__ : Optional[int] = g + heuristic[x][y] # cost from starting cell to destination cell
lowercase__ : Tuple = [[f, g, x, y]]
lowercase__ : Union[str, Any] = False # flag that is set when search is complete
lowercase__ : Any = False # flag set if we can't find expand
while not found and not resign:
if len(lowerCamelCase__ ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowercase__ : Tuple = cell.pop()
lowercase__ : Optional[Any] = next_cell[2]
lowercase__ : int = next_cell[3]
lowercase__ : Union[str, Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
lowercase__ : Tuple = True
else:
for i in range(len(lowerCamelCase__ ) ): # to try out different valid actions
lowercase__ : Tuple = x + DIRECTIONS[i][0]
lowercase__ : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(lowerCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowercase__ : List[Any] = g + cost
lowercase__ : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowercase__ : Dict = 1
lowercase__ : Union[str, Any] = i
lowercase__ : Optional[int] = []
lowercase__ : List[Any] = goal[0]
lowercase__ : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowercase__ : int = x - DIRECTIONS[action[x][y]][0]
lowercase__ : List[Any] = y - DIRECTIONS[action[x][y]][1]
lowercase__ : Optional[Any] = xa
lowercase__ : Dict = ya
invpath.append([x, y] )
lowercase__ : List[str] = []
for i in range(len(lowerCamelCase__ ) ):
path.append(invpath[len(lowerCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowerCAmelCase__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowerCAmelCase__ = [0, 0]
# all coordinates are given in format [y,x]
lowerCAmelCase__ = [len(grid) - 1, len(grid[0]) - 1]
lowerCAmelCase__ = 1
# the cost map which pushes the path closer to the goal
lowerCAmelCase__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowerCAmelCase__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowerCAmelCase__ = 9_9
lowerCAmelCase__ , lowerCAmelCase__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 121 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : str = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = TextaTextGenerationPipeline(model=_a , tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = generator("""Something there""" )
self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
lowerCamelCase = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
lowerCamelCase = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
lowerCamelCase = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
lowerCamelCase = 3
lowerCamelCase = generator(
"""Something there""" , num_return_sequences=_a , num_beams=_a , )
lowerCamelCase = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a , _a )
lowerCamelCase = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a )
self.assertEqual(
_a , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
lowerCamelCase = generator.model.config.eos_token_id
lowerCamelCase = """<pad>"""
lowerCamelCase = generator(
["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , )
self.assertEqual(
_a , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
lowerCamelCase = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
| 291 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class _A ( __lowercase ):
lowercase__: str = '''deit'''
def __init__( self : Any , __magic_name__ : str=7_68 , __magic_name__ : str=12 , __magic_name__ : List[Any]=12 , __magic_name__ : int=30_72 , __magic_name__ : List[Any]="gelu" , __magic_name__ : Dict=0.0 , __magic_name__ : List[str]=0.0 , __magic_name__ : Optional[Any]=0.02 , __magic_name__ : Union[str, Any]=1E-12 , __magic_name__ : Tuple=2_24 , __magic_name__ : List[str]=16 , __magic_name__ : int=3 , __magic_name__ : Any=True , __magic_name__ : Dict=16 , **__magic_name__ : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**__magic_name__ )
__snake_case : str = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Optional[int] = num_attention_heads
__snake_case : List[Any] = intermediate_size
__snake_case : Optional[Any] = hidden_act
__snake_case : Tuple = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Optional[int] = initializer_range
__snake_case : Dict = layer_norm_eps
__snake_case : Optional[int] = image_size
__snake_case : str = patch_size
__snake_case : List[Any] = num_channels
__snake_case : Dict = qkv_bias
__snake_case : int = encoder_stride
class _A ( __lowercase ):
lowercase__: List[Any] = version.parse('''1.11''' )
@property
def lowercase__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowercase__ ( self : Optional[int] ) -> float:
"""simple docstring"""
return 1E-4
| 13 |
'''simple docstring'''
import os
import numpy
import onnx
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = a.name
__snake_case : Dict = b.name
__snake_case : Optional[int] = """"""
__snake_case : int = """"""
__snake_case : Any = a == b
__snake_case : List[Any] = name_a
__snake_case : List[str] = name_b
return res
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_lowerCamelCase , _lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , _lowerCamelCase , _lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : Dict = list(model.graph.initializer )
__snake_case : List[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__snake_case : Tuple = inits[i].name
__snake_case : Tuple = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase ) -> List[str]:
"""simple docstring"""
__snake_case : str = os.path.dirname(_lowerCamelCase )
__snake_case : Dict = os.path.basename(_lowerCamelCase )
__snake_case : Union[str, Any] = onnx.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) )
__snake_case : Dict = list(model.graph.initializer )
__snake_case : Optional[int] = set()
__snake_case : Optional[Any] = {}
__snake_case : Tuple = []
__snake_case : List[Any] = 0
for i in range(len(_lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_lowerCamelCase )
dup_set.add(_lowerCamelCase )
__snake_case : List[Any] = inits[j].data_type
__snake_case : List[str] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , _lowerCamelCase )
total_reduced_size += mem_size
__snake_case : Any = inits[i].name
__snake_case : Any = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_lowerCamelCase )
else:
__snake_case : Dict = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
__snake_case : int = sorted(_lowerCamelCase )
_remove_dup_initializers_from_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__snake_case : str = """optimized_""" + model_file_name
__snake_case : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
onnx.save(_lowerCamelCase , _lowerCamelCase )
return new_model
| 13 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__snake_case = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''DPTFeatureExtractor''']
__snake_case = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 |
import argparse
import os
import re
import packaging.version
_A : Optional[int] = 'examples/'
_A : str = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
_A : Any = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
_A : List[str] = 'README.md'
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase__ : Tuple = f.read()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern]
lowerCamelCase__ : Union[str, Any] = replace.replace('''VERSION''' , UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = re_pattern.sub(UpperCAmelCase , UpperCAmelCase )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCAmelCase )
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , pattern='''examples''' )
def _a ( UpperCAmelCase , UpperCAmelCase=False ) -> Dict:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not patch:
update_version_in_examples(UpperCAmelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Any = '''🤗 Transformers currently provides the following architectures'''
lowerCamelCase__ : Dict = '''1. Want to contribute a new model?'''
with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase__ : str = f.readlines()
# Find the start of the list.
lowerCamelCase__ : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowerCamelCase__ : Any = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCAmelCase )
def _a ( ) -> Any:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowerCamelCase__ : List[str] = f.read()
lowerCamelCase__ : Any = REPLACE_PATTERNS['''init'''][0].search(UpperCAmelCase ).groups()[0]
return packaging.version.parse(UpperCAmelCase )
def _a ( UpperCAmelCase=False ) -> str:
"""simple docstring"""
lowerCamelCase__ : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowerCamelCase__ : Union[str, Any] = default_version.base_version
elif patch:
lowerCamelCase__ : str = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
lowerCamelCase__ : Dict = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : str = input(f"Which version are you releasing? [{default_version}]" )
if len(UpperCAmelCase ) == 0:
lowerCamelCase__ : int = default_version
print(f"Updating version to {version}." )
global_version_update(UpperCAmelCase , patch=UpperCAmelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : List[str] = get_version()
lowerCamelCase__ : Optional[int] = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
lowerCamelCase__ : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : Dict = input(f"Which version are we developing now? [{dev_version}]" )
if len(UpperCAmelCase ) == 0:
lowerCamelCase__ : Tuple = dev_version
print(f"Updating version to {version}." )
global_version_update(UpperCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_A : Any = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
_A : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 142 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __UpperCamelCase ( _UpperCAmelCase ):
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2], unknown_args[1::2] )}
def __UpperCamelCase ( ):
__UpperCAmelCase : str = ArgumentParser(
"HuggingFace Datasets CLI tool", usage="datasets-cli <command> [<args>]", allow_abbrev=__A )
__UpperCAmelCase : Optional[Any] = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__A )
EnvironmentCommand.register_subcommand(__A )
TestCommand.register_subcommand(__A )
RunBeamCommand.register_subcommand(__A )
DummyDataCommand.register_subcommand(__A )
# Parse args
__UpperCAmelCase , __UpperCAmelCase : str = parser.parse_known_args()
if not hasattr(__A, "func" ):
parser.print_help()
exit(1 )
__UpperCAmelCase : Union[str, Any] = parse_unknown_args(__A )
# Run
__UpperCAmelCase : Dict = args.func(__A, **__A )
service.run()
if __name__ == "__main__":
main()
| 357 |
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __UpperCamelCase ( *_UpperCAmelCase ):
with open(_UpperCAmelCase, "r" ) as fh:
fcntl.flock(_UpperCAmelCase, fcntl.LOCK_EX )
try:
print(*_UpperCAmelCase )
finally:
fcntl.flock(_UpperCAmelCase, fcntl.LOCK_UN )
lowerCAmelCase__ : Dict = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
lowerCAmelCase__ : Optional[int] = torch.device("cuda", local_rank)
lowerCAmelCase__ : List[str] = socket.gethostname()
lowerCAmelCase__ : Optional[Any] = f"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
lowerCAmelCase__ : Tuple = dist.get_rank()
lowerCAmelCase__ : Optional[int] = dist.get_world_size()
printflock(f"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(f"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(f"{gpu} is broken")
raise
| 37 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.