code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: a_ = None a_ = logging.get_logger(__name__) a_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} a_ = { '''vocab_file''': { '''facebook/mbart-large-en-ro''': ( '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model''' ), '''facebook/mbart-large-cc25''': ( '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''', '''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''', }, } a_ = { '''facebook/mbart-large-en-ro''': 1024, '''facebook/mbart-large-cc25''': 1024, } # fmt: off a_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN'''] class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Dict = VOCAB_FILES_NAMES _A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _A : str = ["""input_ids""", """attention_mask"""] _A : Tuple = MBartTokenizer _A : List[int] = [] _A : List[int] = [] def __init__(self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ): # Mask token behave like a normal word, i.e. include the space before it snake_case_ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token super().__init__( vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , ) snake_case_ : Dict = vocab_file snake_case_ : Optional[int] = False if not self.vocab_file else True snake_case_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} ) snake_case_ : Any = { lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } snake_case_ : Tuple = src_lang if src_lang is not None else """en_XX""" snake_case_ : Tuple = self.convert_tokens_to_ids(self._src_lang ) snake_case_ : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __UpperCamelCase (self ): return self._src_lang @src_lang.setter def __UpperCamelCase (self , lowercase__ ): snake_case_ : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __UpperCamelCase (self , lowercase__ , lowercase__ = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __UpperCamelCase (self , lowercase__ , lowercase__ = None ): snake_case_ : List[Any] = [self.sep_token_id] snake_case_ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ): if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) snake_case_ : int = src_lang snake_case_ : List[str] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ ) snake_case_ : List[str] = self.convert_tokens_to_ids(lowercase__ ) snake_case_ : Union[str, Any] = tgt_lang_id return inputs def __UpperCamelCase (self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ): snake_case_ : List[str] = src_lang snake_case_ : int = tgt_lang return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ ) def __UpperCamelCase (self ): return self.set_src_lang_special_tokens(self.src_lang ) def __UpperCamelCase (self ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __UpperCamelCase (self , lowercase__ ): snake_case_ : int = self.convert_tokens_to_ids(lowercase__ ) snake_case_ : Tuple = [] snake_case_ : List[Any] = [self.eos_token_id, self.cur_lang_code] snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens ) snake_case_ : Optional[int] = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __UpperCamelCase (self , lowercase__ ): snake_case_ : Tuple = self.convert_tokens_to_ids(lowercase__ ) snake_case_ : Optional[int] = [] snake_case_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code] snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) snake_case_ : int = self.convert_ids_to_tokens(self.suffix_tokens ) snake_case_ : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __UpperCamelCase (self , lowercase__ , lowercase__ = None ): if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(lowercase__ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory.' ) return snake_case_ : List[str] = os.path.join( lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ): copyfile(self.vocab_file , lowercase__ ) return (out_vocab_file,)
48
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list ): """simple docstring""" snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ ) for i in range(1 , SCREAMING_SNAKE_CASE__ ): snake_case_ : Tuple = collection[i] snake_case_ : Tuple = 0 snake_case_ : str = i - 1 while low <= high: snake_case_ : Optional[int] = (low + high) // 2 if val < collection[mid]: snake_case_ : List[str] = mid - 1 else: snake_case_ : str = mid + 1 for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ): snake_case_ : List[str] = collection[j - 1] snake_case_ : Any = val return collection if __name__ == "__main__": a_ = input('''Enter numbers separated by a comma:\n''').strip() a_ = [int(item) for item in user_input.split(''',''')] print(binary_insertion_sort(unsorted))
48
1
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowercase ( _UpperCAmelCase , unittest.TestCase): """simple docstring""" _A : str = DiTPipeline _A : List[str] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS _A : Any = PipelineTesterMixin.required_optional_params - { """latents""", """num_images_per_prompt""", """callback""", """callback_steps""", } _A : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS _A : Tuple = False def __UpperCamelCase (self ): torch.manual_seed(0 ) snake_case_ : List[str] = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowercase__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=10_00 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowercase__ , ) snake_case_ : Optional[int] = AutoencoderKL() snake_case_ : Dict = DDIMScheduler() snake_case_ : int = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler} return components def __UpperCamelCase (self , lowercase__ , lowercase__=0 ): if str(lowercase__ ).startswith("""mps""" ): snake_case_ : Optional[Any] = torch.manual_seed(lowercase__ ) else: snake_case_ : Any = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ ) snake_case_ : List[Any] = { """class_labels""": [1], """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def __UpperCamelCase (self ): snake_case_ : Tuple = """cpu""" snake_case_ : Optional[int] = self.get_dummy_components() snake_case_ : Optional[int] = self.pipeline_class(**lowercase__ ) pipe.to(lowercase__ ) pipe.set_progress_bar_config(disable=lowercase__ ) snake_case_ : Optional[Any] = self.get_dummy_inputs(lowercase__ ) snake_case_ : Union[str, Any] = pipe(**lowercase__ ).images snake_case_ : Dict = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) snake_case_ : int = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) snake_case_ : Any = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase__ , 1e-3 ) def __UpperCamelCase (self ): self._test_inference_batch_single_identical(relax_max_difference=lowercase__ , expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __UpperCamelCase (self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class __lowercase ( unittest.TestCase): """simple docstring""" def __UpperCamelCase (self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase (self ): snake_case_ : str = torch.manual_seed(0 ) snake_case_ : Tuple = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" ) pipe.to("""cuda""" ) snake_case_ : Union[str, Any] = ["""vase""", """umbrella""", """white shark""", """white wolf"""] snake_case_ : Tuple = pipe.get_label_ids(lowercase__ ) snake_case_ : Optional[Any] = pipe(lowercase__ , generator=lowercase__ , num_inference_steps=40 , output_type="""np""" ).images for word, image in zip(lowercase__ , lowercase__ ): snake_case_ : Any = load_numpy( f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' ) assert np.abs((expected_image - image).max() ) < 1e-2 def __UpperCamelCase (self ): snake_case_ : Tuple = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" ) snake_case_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("""cuda""" ) snake_case_ : Any = ["""vase""", """umbrella"""] snake_case_ : Union[str, Any] = pipe.get_label_ids(lowercase__ ) snake_case_ : Tuple = torch.manual_seed(0 ) snake_case_ : Union[str, Any] = pipe(lowercase__ , generator=lowercase__ , num_inference_steps=25 , output_type="""np""" ).images for word, image in zip(lowercase__ , lowercase__ ): snake_case_ : Optional[int] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" f'/dit/{word}_512.npy' ) assert np.abs((expected_image - image).max() ) < 1e-1
48
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Union[str, Any] = ["""image_processor""", """tokenizer"""] _A : str = """ChineseCLIPImageProcessor""" _A : Tuple = ("""BertTokenizer""", """BertTokenizerFast""") def __init__(self , lowercase__=None , lowercase__=None , **lowercase__ ): snake_case_ : Any = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , lowercase__ , ) snake_case_ : Optional[Any] = kwargs.pop("""feature_extractor""" ) snake_case_ : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(lowercase__ , lowercase__ ) snake_case_ : Union[str, Any] = self.image_processor def __call__(self , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ): if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: snake_case_ : Any = self.tokenizer(lowercase__ , return_tensors=lowercase__ , **lowercase__ ) if images is not None: snake_case_ : Tuple = self.image_processor(lowercase__ , return_tensors=lowercase__ , **lowercase__ ) if text is not None and images is not None: snake_case_ : List[Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ ) def __UpperCamelCase (self , *lowercase__ , **lowercase__ ): return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ ) def __UpperCamelCase (self , *lowercase__ , **lowercase__ ): return self.tokenizer.decode(*lowercase__ , **lowercase__ ) @property def __UpperCamelCase (self ): snake_case_ : Optional[int] = self.tokenizer.model_input_names snake_case_ : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __UpperCamelCase (self ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase__ , ) return self.image_processor_class
48
1
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" if "img_encoder.pos_embed" in name: snake_case_ : Optional[Any] = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" ) if "img_encoder.patch_embed.proj" in name: snake_case_ : List[Any] = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" ) if "img_encoder.patch_embed.norm" in name: snake_case_ : Union[str, Any] = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" ) if "img_encoder.layers" in name: snake_case_ : List[Any] = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" ) if "blocks" in name and "res" not in name: snake_case_ : Dict = name.replace("""blocks""" , """layers""" ) if "attn" in name and "pre_assign" not in name: snake_case_ : Union[str, Any] = name.replace("""attn""" , """self_attn""" ) if "proj" in name and "self_attn" in name and "text" not in name: snake_case_ : Union[str, Any] = name.replace("""proj""" , """out_proj""" ) if "pre_assign_attn.attn.proj" in name: snake_case_ : Dict = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" ) if "norm1" in name: snake_case_ : Optional[Any] = name.replace("""norm1""" , """layer_norm1""" ) if "norm2" in name and "pre_assign" not in name: snake_case_ : int = name.replace("""norm2""" , """layer_norm2""" ) if "img_encoder.norm" in name: snake_case_ : Optional[Any] = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" ) # text encoder if "text_encoder.token_embedding" in name: snake_case_ : int = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" ) if "text_encoder.positional_embedding" in name: snake_case_ : Tuple = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" ) if "text_encoder.transformer.resblocks." in name: snake_case_ : str = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" ) if "ln_1" in name: snake_case_ : Optional[Any] = name.replace("""ln_1""" , """layer_norm1""" ) if "ln_2" in name: snake_case_ : int = name.replace("""ln_2""" , """layer_norm2""" ) if "c_fc" in name: snake_case_ : Union[str, Any] = name.replace("""c_fc""" , """fc1""" ) if "c_proj" in name: snake_case_ : Union[str, Any] = name.replace("""c_proj""" , """fc2""" ) if "text_encoder" in name: snake_case_ : Optional[int] = name.replace("""text_encoder""" , """text_model""" ) if "ln_final" in name: snake_case_ : Any = name.replace("""ln_final""" , """final_layer_norm""" ) # projection layers if "img_projector.linear_hidden." in name: snake_case_ : List[str] = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" ) if "img_projector.linear_out." in name: snake_case_ : int = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" ) if "text_projector.linear_hidden" in name: snake_case_ : Dict = name.replace("""text_projector.linear_hidden""" , """text_projection""" ) if "text_projector.linear_out" in name: snake_case_ : Dict = name.replace("""text_projector.linear_out""" , """text_projection.3""" ) return name def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" for key in orig_state_dict.copy().keys(): snake_case_ : Dict = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors snake_case_ : Dict = key.split(""".""" ) snake_case_ , snake_case_ : Tuple = int(key_split[2] ), int(key_split[4] ) snake_case_ : List[Any] = config.vision_config.hidden_size if "weight" in key: snake_case_ : List[str] = val[:dim, :] snake_case_ : Union[str, Any] = val[dim : dim * 2, :] snake_case_ : Tuple = val[-dim:, :] else: snake_case_ : Optional[int] = val[:dim] snake_case_ : Optional[Any] = val[dim : dim * 2] snake_case_ : Union[str, Any] = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors snake_case_ : Any = key.split(""".""" ) snake_case_ : Optional[Any] = int(key_split[3] ) snake_case_ : Optional[int] = config.text_config.hidden_size if "weight" in key: snake_case_ : Tuple = val[:dim, :] snake_case_ : Optional[Any] = val[ dim : dim * 2, : ] snake_case_ : Dict = val[-dim:, :] else: snake_case_ : Optional[int] = val[:dim] snake_case_ : Any = val[dim : dim * 2] snake_case_ : str = val[-dim:] else: snake_case_ : str = rename_key(SCREAMING_SNAKE_CASE__ ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): snake_case_ : str = val.squeeze_() else: snake_case_ : Dict = val return orig_state_dict def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ : Tuple = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int]="groupvit-gcc-yfcc" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ): """simple docstring""" snake_case_ : Any = GroupViTConfig() snake_case_ : str = GroupViTModel(SCREAMING_SNAKE_CASE__ ).eval() snake_case_ : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )["""model"""] snake_case_ : Union[str, Any] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ , snake_case_ : int = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(SCREAMING_SNAKE_CASE__ ) == 0) # verify result snake_case_ : str = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" ) snake_case_ : int = prepare_img() snake_case_ : Optional[int] = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ) with torch.no_grad(): snake_case_ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ ) if model_name == "groupvit-gcc-yfcc": snake_case_ : Union[str, Any] = torch.tensor([[13.3523, 6.3629]] ) elif model_name == "groupvit-gcc-redcaps": snake_case_ : List[Any] = torch.tensor([[16.1873, 8.6230]] ) else: raise ValueError(f'Model name {model_name} not supported.' ) assert torch.allclose(outputs.logits_per_image , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print("""Successfully saved processor and model to""" , SCREAMING_SNAKE_CASE__ ) if push_to_hub: print("""Pushing to the hub...""" ) processor.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" ) model.push_to_hub(SCREAMING_SNAKE_CASE__ , organization="""nielsr""" ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.''' ) parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''') parser.add_argument( '''--model_name''', default='''groupvit-gccy-fcc''', type=str, help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''', ) a_ = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
48
"""simple docstring""" import argparse import copy def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" snake_case_ : List[Any] = {} with open(SCREAMING_SNAKE_CASE__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: snake_case_ : int = [] _list.append([line.split()[1], line.split()[2]] ) snake_case_ : Optional[Any] = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: snake_case_ : str = [] _list.append([line.split()[0], line.split()[2]] ) snake_case_ : Optional[Any] = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" with open(SCREAMING_SNAKE_CASE__ ) as f: snake_case_ : Optional[Any] = f.read(1 ) snake_case_ : Union[str, Any] = start_node snake_case_ : Dict = [] snake_case_ : Union[str, Any] = start_node snake_case_ : Tuple = 0 while visiting not in first_solution: snake_case_ : int = 1_0_0_0_0 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution: snake_case_ : Union[str, Any] = k[1] snake_case_ : Any = k[0] first_solution.append(SCREAMING_SNAKE_CASE__ ) snake_case_ : Tuple = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ ) snake_case_ : List[str] = best_node first_solution.append(SCREAMING_SNAKE_CASE__ ) snake_case_ : Optional[Any] = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 snake_case_ : int = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0_0_0_0 ) return first_solution, distance_of_first_solution def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" snake_case_ : Union[str, Any] = [] for n in solution[1:-1]: snake_case_ : str = solution.index(SCREAMING_SNAKE_CASE__ ) for kn in solution[1:-1]: snake_case_ : Tuple = solution.index(SCREAMING_SNAKE_CASE__ ) if n == kn: continue snake_case_ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ ) snake_case_ : int = kn snake_case_ : Dict = n snake_case_ : Optional[int] = 0 for k in _tmp[:-1]: snake_case_ : Dict = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: snake_case_ : Dict = distance + int(i[1] ) _tmp.append(SCREAMING_SNAKE_CASE__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) snake_case_ : Optional[Any] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ): """simple docstring""" snake_case_ : Dict = 1 snake_case_ : List[Any] = first_solution snake_case_ : List[Any] = [] snake_case_ : Optional[Any] = distance_of_first_solution snake_case_ : Dict = solution while count <= iters: snake_case_ : List[str] = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ : List[Any] = 0 snake_case_ : List[Any] = neighborhood[index_of_best_solution] snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1 snake_case_ : List[str] = False while not found: snake_case_ : Tuple = 0 while i < len(SCREAMING_SNAKE_CASE__ ): if best_solution[i] != solution[i]: snake_case_ : Optional[Any] = best_solution[i] snake_case_ : int = solution[i] break snake_case_ : List[str] = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) snake_case_ : Tuple = True snake_case_ : Dict = best_solution[:-1] snake_case_ : Tuple = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: snake_case_ : Tuple = cost snake_case_ : Union[str, Any] = solution else: snake_case_ : str = index_of_best_solution + 1 snake_case_ : Tuple = neighborhood[index_of_best_solution] if len(SCREAMING_SNAKE_CASE__ ) >= size: tabu_list.pop(0 ) snake_case_ : List[str] = count + 1 return best_solution_ever, best_cost def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ): """simple docstring""" snake_case_ : Tuple = generate_neighbours(args.File ) snake_case_ , snake_case_ : Optional[Any] = generate_first_solution( args.File , SCREAMING_SNAKE_CASE__ ) snake_case_ , snake_case_ : Dict = tabu_search( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , ) print(f'Best solution: {best_sol}, with total distance: {best_cost}.' ) if __name__ == "__main__": a_ = argparse.ArgumentParser(description='''Tabu Search''') parser.add_argument( '''-f''', '''--File''', type=str, help='''Path to the file containing the data''', required=True, ) parser.add_argument( '''-i''', '''--Iterations''', type=int, help='''How many iterations the algorithm should perform''', required=True, ) parser.add_argument( '''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True ) # Pass the arguments to main method main(parser.parse_args())
48
1
"""simple docstring""" import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=None ): """simple docstring""" snake_case_ : Tuple = None if token is not None: snake_case_ : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'} snake_case_ : Any = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100' snake_case_ : int = requests.get(SCREAMING_SNAKE_CASE__ , headers=SCREAMING_SNAKE_CASE__ ).json() snake_case_ : List[str] = {} try: job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) snake_case_ : Union[str, Any] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 ) for i in range(SCREAMING_SNAKE_CASE__ ): snake_case_ : Dict = requests.get(url + f'&page={i + 2}' , headers=SCREAMING_SNAKE_CASE__ ).json() job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) return job_links except Exception: print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' ) return {} def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int=None ): """simple docstring""" snake_case_ : Optional[Any] = None if token is not None: snake_case_ : List[str] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'} snake_case_ : int = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100' snake_case_ : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ , headers=SCREAMING_SNAKE_CASE__ ).json() snake_case_ : int = {} try: artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} ) snake_case_ : Any = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 ) for i in range(SCREAMING_SNAKE_CASE__ ): snake_case_ : int = requests.get(url + f'&page={i + 2}' , headers=SCREAMING_SNAKE_CASE__ ).json() artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} ) return artifacts except Exception: print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' ) return {} def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): """simple docstring""" snake_case_ : Optional[Any] = None if token is not None: snake_case_ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'} snake_case_ : List[str] = requests.get(SCREAMING_SNAKE_CASE__ , headers=SCREAMING_SNAKE_CASE__ , allow_redirects=SCREAMING_SNAKE_CASE__ ) snake_case_ : Optional[Any] = result.headers["""Location"""] snake_case_ : Tuple = requests.get(SCREAMING_SNAKE_CASE__ , allow_redirects=SCREAMING_SNAKE_CASE__ ) snake_case_ : Any = os.path.join(SCREAMING_SNAKE_CASE__ , f'{artifact_name}.zip' ) with open(SCREAMING_SNAKE_CASE__ , """wb""" ) as fp: fp.write(response.content ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any]=None ): """simple docstring""" snake_case_ : str = [] snake_case_ : Optional[int] = [] snake_case_ : Union[str, Any] = None with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z: for filename in z.namelist(): if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(SCREAMING_SNAKE_CASE__ ) as f: for line in f: snake_case_ : Union[str, Any] = line.decode("""UTF-8""" ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs snake_case_ : str = line[: line.index(""": """ )] snake_case_ : List[str] = line[line.index(""": """ ) + len(""": """ ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("""FAILED """ ): # `test` is the test method that failed snake_case_ : int = line[len("""FAILED """ ) :] failed_tests.append(SCREAMING_SNAKE_CASE__ ) elif filename == "job_name.txt": snake_case_ : Tuple = line if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ): raise ValueError( f'`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE__ )} for `errors` ' f'and {len(SCREAMING_SNAKE_CASE__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some' """ problem.""" ) snake_case_ : Optional[int] = None if job_name and job_links: snake_case_ : Any = job_links.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # A list with elements of the form (line of error, error, failed test) snake_case_ : Optional[Any] = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] return result def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=None ): """simple docstring""" snake_case_ : List[Any] = [] snake_case_ : int = [os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for p in os.listdir(SCREAMING_SNAKE_CASE__ ) if p.endswith(""".zip""" )] for p in paths: errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE__ , job_links=SCREAMING_SNAKE_CASE__ ) ) return errors def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ): """simple docstring""" snake_case_ : Union[str, Any] = Counter() counter.update([x[1] for x in logs] ) snake_case_ : Tuple = counter.most_common() snake_case_ : str = {} for error, count in counts: if error_filter is None or error not in error_filter: snake_case_ : Tuple = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]} snake_case_ : str = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE__ ) ) return r def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" snake_case_ : Union[str, Any] = test.split("""::""" )[0] if test.startswith("""tests/models/""" ): snake_case_ : Dict = test.split("""/""" )[2] else: snake_case_ : List[Any] = None return test def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict=None ): """simple docstring""" snake_case_ : Optional[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs] snake_case_ : Any = [x for x in logs if x[2] is not None] snake_case_ : List[str] = {x[2] for x in logs} snake_case_ : Union[str, Any] = {} for test in tests: snake_case_ : Tuple = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) snake_case_ : List[Any] = counter.most_common() snake_case_ : Dict = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} snake_case_ : List[str] = sum(error_counts.values() ) if n_errors > 0: snake_case_ : Tuple = {"""count""": n_errors, """errors""": error_counts} snake_case_ : Union[str, Any] = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE__ ) ) return r def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" snake_case_ : Optional[Any] = """| no. | error | status |""" snake_case_ : int = """|-:|:-|:-|""" snake_case_ : str = [header, sep] for error in reduced_by_error: snake_case_ : List[str] = reduced_by_error[error]["""count"""] snake_case_ : List[str] = f'| {count} | {error[:1_0_0]} | |' lines.append(SCREAMING_SNAKE_CASE__ ) return "\n".join(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] ): """simple docstring""" snake_case_ : Dict = """| model | no. of errors | major error | count |""" snake_case_ : List[Any] = """|-:|-:|-:|-:|""" snake_case_ : int = [header, sep] for model in reduced_by_model: snake_case_ : List[str] = reduced_by_model[model]["""count"""] snake_case_ , snake_case_ : Union[str, Any] = list(reduced_by_model[model]["""errors"""].items() )[0] snake_case_ : Tuple = f'| {model} | {count} | {error[:6_0]} | {_count} |' lines.append(SCREAMING_SNAKE_CASE__ ) return "\n".join(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') parser.add_argument( '''--output_dir''', type=str, required=True, help='''Where to store the downloaded artifacts and other result files.''', ) parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''') a_ = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) a_ = get_job_links(args.workflow_run_id, token=args.token) a_ = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: a_ = k.find(''' / ''') a_ = k[index + len(''' / ''') :] a_ = v with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) a_ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) a_ = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error a_ = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors a_ = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) a_ = reduce_by_error(errors) a_ = reduce_by_model(errors) a_ = make_github_table(reduced_by_error) a_ = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp: fp.write(sa) with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp: fp.write(sa)
48
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings a_ = r''' [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: title_sep (`str`, *optional*, defaults to `" / "`): Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`]. doc_sep (`str`, *optional*, defaults to `" // "`): Separator inserted between the text of the retrieved document and the original input when calling [`RagRetriever`]. n_docs (`int`, *optional*, defaults to 5): Number of documents to retrieve. max_combined_length (`int`, *optional*, defaults to 300): Max length of contextualized input returned by [`~RagRetriever.__call__`]. retrieval_vector_size (`int`, *optional*, defaults to 768): Dimensionality of the document embeddings indexed by [`RagRetriever`]. retrieval_batch_size (`int`, *optional*, defaults to 8): Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated [`RagRetriever`]. dataset (`str`, *optional*, defaults to `"wiki_dpr"`): A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids using `datasets.list_datasets()`). dataset_split (`str`, *optional*, defaults to `"train"`) Which split of the `dataset` to load. index_name (`str`, *optional*, defaults to `"compressed"`) The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and `"compressed"`. index_path (`str`, *optional*) The path to the serialized faiss index on disk. passages_path (`str`, *optional*): A path to text passages compatible with the faiss index. Required if using [`~models.rag.retrieval_rag.LegacyIndex`] use_dummy_dataset (`bool`, *optional*, defaults to `False`) Whether to load a "dummy" variant of the dataset specified by `dataset`. label_smoothing (`float`, *optional*, defaults to 0.0): Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. do_marginalize (`bool`, *optional*, defaults to `False`): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. reduce_loss (`bool`, *optional*, defaults to `False`): Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation. do_deduplication (`bool`, *optional*, defaults to `True`): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. exclude_bos_score (`bool`, *optional*, defaults to `False`): Whether or not to disregard the BOS token when computing the loss. output_retrieved(`bool`, *optional*, defaults to `False`): If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask` are returned. See returned tensors for more detail. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. ''' @add_start_docstrings(_UpperCAmelCase) class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Optional[int] = """rag""" _A : Optional[Any] = True def __init__(self , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=" / " , lowercase__=" // " , lowercase__=5 , lowercase__=3_00 , lowercase__=7_68 , lowercase__=8 , lowercase__="wiki_dpr" , lowercase__="train" , lowercase__="compressed" , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=None , **lowercase__ , ): super().__init__( bos_token_id=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , prefix=lowercase__ , vocab_size=lowercase__ , **lowercase__ , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" snake_case_ : List[Any] = kwargs.pop("""question_encoder""" ) snake_case_ : Tuple = question_encoder_config.pop("""model_type""" ) snake_case_ : List[str] = kwargs.pop("""generator""" ) snake_case_ : List[str] = decoder_config.pop("""model_type""" ) from ..auto.configuration_auto import AutoConfig snake_case_ : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ ) snake_case_ : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ ) snake_case_ : int = reduce_loss snake_case_ : Optional[int] = label_smoothing snake_case_ : Dict = exclude_bos_score snake_case_ : Union[str, Any] = do_marginalize snake_case_ : Union[str, Any] = title_sep snake_case_ : int = doc_sep snake_case_ : int = n_docs snake_case_ : List[str] = max_combined_length snake_case_ : Tuple = dataset snake_case_ : int = dataset_split snake_case_ : str = index_name snake_case_ : List[str] = retrieval_vector_size snake_case_ : Dict = retrieval_batch_size snake_case_ : str = passages_path snake_case_ : Union[str, Any] = index_path snake_case_ : Tuple = use_dummy_dataset snake_case_ : Dict = output_retrieved snake_case_ : str = do_deduplication snake_case_ : Any = use_cache if self.forced_eos_token_id is None: snake_case_ : Any = getattr(self.generator , """forced_eos_token_id""" , lowercase__ ) @classmethod def __UpperCamelCase (cls , lowercase__ , lowercase__ , **lowercase__ ): return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase__ ) def __UpperCamelCase (self ): snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ ) snake_case_ : Any = self.question_encoder.to_dict() snake_case_ : Dict = self.generator.to_dict() snake_case_ : Union[str, Any] = self.__class__.model_type return output
48
1
"""simple docstring""" import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger a_ = get_logger(__name__) a_ = Path(__file__).parent / '''model_card_template.md''' a_ = uuida().hex a_ = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES a_ = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES a_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/''' def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[Dict, str, None] = None ): """simple docstring""" snake_case_ : List[Any] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'; torch/{_torch_version}' if is_flax_available(): ua += f'; jax/{_jax_version}' ua += f'; flax/{_flax_version}' if is_onnx_available(): ua += f'; onnxruntime/{_onnxruntime_version}' # CI will set this value to True if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() ) elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): ua += "; " + user_agent return ua def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None ): """simple docstring""" if token is None: snake_case_ : Optional[Any] = HfFolder.get_token() if organization is None: snake_case_ : Tuple = whoami(SCREAMING_SNAKE_CASE__ )["""name"""] return f'{username}/{model_id}' else: return f'{organization}/{model_id}' def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" if not is_jinja_available(): raise ValueError( """Modelcard rendering is based on Jinja templates.""" """ Please make sure to have `jinja` installed before using `create_model_card`.""" """ To install it, please run `pip install Jinja2`.""" ) if hasattr(SCREAMING_SNAKE_CASE__ , """local_rank""" ) and args.local_rank not in [-1, 0]: return snake_case_ : Optional[int] = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , """hub_token""" ) else None snake_case_ : Union[str, Any] = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ ) snake_case_ : Optional[Any] = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , """gradient_accumulation_steps""" ) else None ) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , ) snake_case_ : Tuple = os.path.join(args.output_dir , """README.md""" ) model_card.save(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] = None ): """simple docstring""" if resolved_file is None or commit_hash is not None: return commit_hash snake_case_ : Optional[int] = str(Path(SCREAMING_SNAKE_CASE__ ).as_posix() ) snake_case_ : List[str] = re.search(R"""snapshots/([^/]+)/""" , SCREAMING_SNAKE_CASE__ ) if search is None: return None snake_case_ : Union[str, Any] = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__ ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. a_ = os.path.expanduser( os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface''')) ) a_ = os.path.join(hf_cache_home, '''diffusers''') def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None ): """simple docstring""" if new_cache_dir is None: snake_case_ : Tuple = DIFFUSERS_CACHE if old_cache_dir is None: snake_case_ : List[Any] = old_diffusers_cache snake_case_ : List[str] = Path(SCREAMING_SNAKE_CASE__ ).expanduser() snake_case_ : Union[str, Any] = Path(SCREAMING_SNAKE_CASE__ ).expanduser() for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): snake_case_ : Union[str, Any] = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__ ) new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) try: os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) except OSError: logger.warning( """Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). a_ = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''') if not os.path.isfile(cache_version_file): a_ = 0 else: with open(cache_version_file) as f: try: a_ = int(f.read()) except ValueError: a_ = 0 if cache_version < 1: a_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( '''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ''' '''existing cached models. This is a one-time operation, you can interrupt it or run it ''' '''later by calling `diffusers.utils.hub_utils.move_cache()`.''' ) try: move_cache() except Exception as e: a_ = '''\n'''.join(traceback.format_tb(e.__traceback__)) logger.error( F'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease ''' '''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ''' '''message and we will do our best to help.''' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, '''w''') as f: f.write('''1''') except Exception: logger.warning( F'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure ''' '''the directory exists and can be written to.''' ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ): """simple docstring""" if variant is not None: snake_case_ : int = weights_name.split(""".""" ) snake_case_ : Dict = splits[:-1] + [variant] + splits[-1:] snake_case_ : int = """.""".join(SCREAMING_SNAKE_CASE__ ) return weights_name def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , *, SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int=None , ): """simple docstring""" snake_case_ : int = str(SCREAMING_SNAKE_CASE__ ) if os.path.isfile(SCREAMING_SNAKE_CASE__ ): return pretrained_model_name_or_path elif os.path.isdir(SCREAMING_SNAKE_CASE__ ): if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ): # Load from a PyTorch checkpoint snake_case_ : str = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ): snake_case_ : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return model_file else: raise EnvironmentError( f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse("""0.20.0""" ) ): try: snake_case_ : Tuple = hf_hub_download( SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , ) warnings.warn( f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , SCREAMING_SNAKE_CASE__ , ) return model_file except: # noqa: E722 warnings.warn( f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}\' so that the correct variant file can be added.' , SCREAMING_SNAKE_CASE__ , ) try: # 2. Load model file as usual snake_case_ : List[str] = hf_hub_download( SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ' """listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """ """token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """ """login`.""" ) except RevisionNotFoundError: raise EnvironmentError( f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ' """this model name. Check the model page at """ f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' ) except EntryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' ) except HTTPError as err: raise EnvironmentError( f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' ) except ValueError: raise EnvironmentError( f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it' f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a' f' directory containing a file named {weights_name} or' """ \nCheckout your internet connection or see how to run the library in""" """ offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" ) except EnvironmentError: raise EnvironmentError( f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ' """'https://huggingface.co/models', make sure you don't have a local directory with the same name. """ f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ' f'containing a file named {weights_name}' )
48
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING a_ = logging.get_logger(__name__) class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Optional[int] = """upernet""" def __init__(self , lowercase__=None , lowercase__=5_12 , lowercase__=0.02 , lowercase__=[1, 2, 3, 6] , lowercase__=True , lowercase__=0.4 , lowercase__=3_84 , lowercase__=2_56 , lowercase__=1 , lowercase__=False , lowercase__=2_55 , **lowercase__ , ): super().__init__(**lowercase__ ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) snake_case_ : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(lowercase__ , lowercase__ ): snake_case_ : Tuple = backbone_config.get("""model_type""" ) snake_case_ : List[str] = CONFIG_MAPPING[backbone_model_type] snake_case_ : List[Any] = config_class.from_dict(lowercase__ ) snake_case_ : List[Any] = backbone_config snake_case_ : Optional[Any] = hidden_size snake_case_ : Any = initializer_range snake_case_ : str = pool_scales snake_case_ : Dict = use_auxiliary_head snake_case_ : str = auxiliary_loss_weight snake_case_ : List[str] = auxiliary_in_channels snake_case_ : Optional[Any] = auxiliary_channels snake_case_ : Any = auxiliary_num_convs snake_case_ : List[Any] = auxiliary_concat_input snake_case_ : List[str] = loss_ignore_index def __UpperCamelCase (self ): snake_case_ : Dict = copy.deepcopy(self.__dict__ ) snake_case_ : Union[str, Any] = self.backbone_config.to_dict() snake_case_ : Any = self.__class__.model_type return output
48
1
"""simple docstring""" from typing import Any def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : dict , ): """simple docstring""" _validation( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) # Creates data structures and fill initial step snake_case_ : dict = {} snake_case_ : dict = {} for state in states_space: snake_case_ : str = observations_space[0] snake_case_ : Union[str, Any] = ( initial_probabilities[state] * emission_probabilities[state][observation] ) snake_case_ : int = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(SCREAMING_SNAKE_CASE__ ) ): snake_case_ : Union[str, Any] = observations_space[o] snake_case_ : Tuple = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function snake_case_ : Tuple = """""" snake_case_ : Any = -1 for k_state in states_space: snake_case_ : Optional[Any] = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: snake_case_ : Optional[Any] = probability snake_case_ : str = k_state # Update probabilities and pointers dicts snake_case_ : Optional[Any] = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) snake_case_ : Any = arg_max # The final observation snake_case_ : Dict = observations_space[len(SCREAMING_SNAKE_CASE__ ) - 1] # argmax for given final observation snake_case_ : str = """""" snake_case_ : Tuple = -1 for k_state in states_space: snake_case_ : int = probabilities[(k_state, final_observation)] if probability > max_probability: snake_case_ : Dict = probability snake_case_ : List[Any] = k_state snake_case_ : Optional[int] = arg_max # Process pointers backwards snake_case_ : str = last_state snake_case_ : Dict = [] for o in range(len(SCREAMING_SNAKE_CASE__ ) - 1 , -1 , -1 ): result.append(SCREAMING_SNAKE_CASE__ ) snake_case_ : List[Any] = pointers[previous, observations_space[o]] result.reverse() return result def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , ): """simple docstring""" _validate_not_empty( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) _validate_lists(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) _validate_dicts( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , ): """simple docstring""" if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError("""There's an empty parameter""" ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" _validate_list(SCREAMING_SNAKE_CASE__ , """observations_space""" ) _validate_list(SCREAMING_SNAKE_CASE__ , """states_space""" ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" if not isinstance(_object , SCREAMING_SNAKE_CASE__ ): snake_case_ : Optional[int] = f'{var_name} must be a list' raise ValueError(SCREAMING_SNAKE_CASE__ ) else: for x in _object: if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ : Dict = f'{var_name} must be a list of strings' raise ValueError(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , ): """simple docstring""" _validate_dict(SCREAMING_SNAKE_CASE__ , """initial_probabilities""" , SCREAMING_SNAKE_CASE__ ) _validate_nested_dict(SCREAMING_SNAKE_CASE__ , """transition_probabilities""" ) _validate_nested_dict(SCREAMING_SNAKE_CASE__ , """emission_probabilities""" ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" _validate_dict(_object , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for x in _object.values(): _validate_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : type , SCREAMING_SNAKE_CASE__ : bool = False ): """simple docstring""" if not isinstance(_object , SCREAMING_SNAKE_CASE__ ): snake_case_ : Optional[Any] = f'{var_name} must be a dict' raise ValueError(SCREAMING_SNAKE_CASE__ ) if not all(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for x in _object ): snake_case_ : Optional[Any] = f'{var_name} all keys must be strings' raise ValueError(SCREAMING_SNAKE_CASE__ ) if not all(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for x in _object.values() ): snake_case_ : Optional[Any] = """nested dictionary """ if nested else """""" snake_case_ : int = f'{var_name} {nested_text}all values must be {value_type.__name__}' raise ValueError(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": from doctest import testmod testmod()
48
"""simple docstring""" import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask a_ = logging.getLogger(__name__) class __lowercase ( _UpperCAmelCase): """simple docstring""" def __init__(self , lowercase__=-1 ): # in NER datasets, the last column is usually reserved for NER label snake_case_ : Union[str, Any] = label_idx def __UpperCamelCase (self , lowercase__ , lowercase__ ): if isinstance(lowercase__ , lowercase__ ): snake_case_ : List[str] = mode.value snake_case_ : List[Any] = os.path.join(lowercase__ , f'{mode}.txt' ) snake_case_ : Tuple = 1 snake_case_ : Any = [] with open(lowercase__ , encoding="""utf-8""" ) as f: snake_case_ : str = [] snake_case_ : List[Any] = [] for line in f: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) ) guid_index += 1 snake_case_ : Optional[Any] = [] snake_case_ : int = [] else: snake_case_ : Optional[Any] = line.split(""" """ ) words.append(splits[0] ) if len(lowercase__ ) > 1: labels.append(splits[self.label_idx].replace("""\n""" , """""" ) ) else: # Examples could have no label for mode = "test" labels.append("""O""" ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) ) return examples def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : str = 0 for line in test_input_reader: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": writer.write(lowercase__ ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: snake_case_ : Optional[int] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n""" writer.write(lowercase__ ) else: logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] ) def __UpperCamelCase (self , lowercase__ ): if path: with open(lowercase__ , """r""" ) as f: snake_case_ : Dict = f.read().splitlines() if "O" not in labels: snake_case_ : List[Any] = ["""O"""] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class __lowercase ( _UpperCAmelCase): """simple docstring""" def __init__(self ): # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def __UpperCamelCase (self , lowercase__ ): if path: with open(lowercase__ , """r""" ) as f: snake_case_ : Any = f.read().splitlines() if "O" not in labels: snake_case_ : Tuple = ["""O"""] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class __lowercase ( _UpperCAmelCase): """simple docstring""" def __UpperCamelCase (self , lowercase__ , lowercase__ ): if isinstance(lowercase__ , lowercase__ ): snake_case_ : List[Any] = mode.value snake_case_ : Optional[int] = os.path.join(lowercase__ , f'{mode}.txt' ) snake_case_ : Tuple = 1 snake_case_ : str = [] with open(lowercase__ , encoding="""utf-8""" ) as f: for sentence in parse_incr(lowercase__ ): snake_case_ : Tuple = [] snake_case_ : Any = [] for token in sentence: words.append(token["""form"""] ) labels.append(token["""upos"""] ) assert len(lowercase__ ) == len(lowercase__ ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) ) guid_index += 1 return examples def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Dict = 0 for sentence in parse_incr(lowercase__ ): snake_case_ : int = preds_list[example_id] snake_case_ : Dict = """""" for token in sentence: out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ' out += "\n" writer.write(lowercase__ ) example_id += 1 def __UpperCamelCase (self , lowercase__ ): if path: with open(lowercase__ , """r""" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
48
1
"""simple docstring""" from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __lowercase ( _UpperCAmelCase): """simple docstring""" def __init__(self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = False , lowercase__ = None , lowercase__ = True , lowercase__ = "arrow" , **lowercase__ , ): super().__init__( split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , **lowercase__ , ) snake_case_ : Dict = load_from_cache_file snake_case_ : List[str] = file_format snake_case_ : Optional[Any] = Spark( df=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , working_dir=lowercase__ , **lowercase__ , ) def __UpperCamelCase (self ): if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) snake_case_ : List[Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=lowercase__ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
48
"""simple docstring""" import random def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" snake_case_ : Union[str, Any] = num - 1 snake_case_ : List[str] = 0 while s % 2 == 0: snake_case_ : str = s // 2 t += 1 for _ in range(5 ): snake_case_ : List[Any] = random.randrange(2 , num - 1 ) snake_case_ : Dict = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if v != 1: snake_case_ : int = 0 while v != (num - 1): if i == t - 1: return False else: snake_case_ : str = i + 1 snake_case_ : int = (v**2) % num return True def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" if num < 2: return False snake_case_ : Dict = [ 2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1, 4_3, 4_7, 5_3, 5_9, 6_1, 6_7, 7_1, 7_3, 7_9, 8_3, 8_9, 9_7, 1_0_1, 1_0_3, 1_0_7, 1_0_9, 1_1_3, 1_2_7, 1_3_1, 1_3_7, 1_3_9, 1_4_9, 1_5_1, 1_5_7, 1_6_3, 1_6_7, 1_7_3, 1_7_9, 1_8_1, 1_9_1, 1_9_3, 1_9_7, 1_9_9, 2_1_1, 2_2_3, 2_2_7, 2_2_9, 2_3_3, 2_3_9, 2_4_1, 2_5_1, 2_5_7, 2_6_3, 2_6_9, 2_7_1, 2_7_7, 2_8_1, 2_8_3, 2_9_3, 3_0_7, 3_1_1, 3_1_3, 3_1_7, 3_3_1, 3_3_7, 3_4_7, 3_4_9, 3_5_3, 3_5_9, 3_6_7, 3_7_3, 3_7_9, 3_8_3, 3_8_9, 3_9_7, 4_0_1, 4_0_9, 4_1_9, 4_2_1, 4_3_1, 4_3_3, 4_3_9, 4_4_3, 4_4_9, 4_5_7, 4_6_1, 4_6_3, 4_6_7, 4_7_9, 4_8_7, 4_9_1, 4_9_9, 5_0_3, 5_0_9, 5_2_1, 5_2_3, 5_4_1, 5_4_7, 5_5_7, 5_6_3, 5_6_9, 5_7_1, 5_7_7, 5_8_7, 5_9_3, 5_9_9, 6_0_1, 6_0_7, 6_1_3, 6_1_7, 6_1_9, 6_3_1, 6_4_1, 6_4_3, 6_4_7, 6_5_3, 6_5_9, 6_6_1, 6_7_3, 6_7_7, 6_8_3, 6_9_1, 7_0_1, 7_0_9, 7_1_9, 7_2_7, 7_3_3, 7_3_9, 7_4_3, 7_5_1, 7_5_7, 7_6_1, 7_6_9, 7_7_3, 7_8_7, 7_9_7, 8_0_9, 8_1_1, 8_2_1, 8_2_3, 8_2_7, 8_2_9, 8_3_9, 8_5_3, 8_5_7, 8_5_9, 8_6_3, 8_7_7, 8_8_1, 8_8_3, 8_8_7, 9_0_7, 9_1_1, 9_1_9, 9_2_9, 9_3_7, 9_4_1, 9_4_7, 9_5_3, 9_6_7, 9_7_1, 9_7_7, 9_8_3, 9_9_1, 9_9_7, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ): """simple docstring""" while True: snake_case_ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) ) if is_prime_low_num(SCREAMING_SNAKE_CASE__ ): return num if __name__ == "__main__": a_ = generate_large_prime() print(('''Prime number:''', num)) print(('''is_prime_low_num:''', is_prime_low_num(num)))
48
1
"""simple docstring""" from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Optional[int] = ["""vqvae"""] def __init__(self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ): super().__init__() self.register_modules(unet=lowercase__ , scheduler=lowercase__ , mel=lowercase__ , vqvae=lowercase__ ) def __UpperCamelCase (self ): return 50 if isinstance(self.scheduler , lowercase__ ) else 10_00 @torch.no_grad() def __call__(self , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = 0 , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__ = 0 , lowercase__ = 0 , lowercase__ = None , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__=True , ): snake_case_ : List[str] = steps or self.get_default_steps() self.scheduler.set_timesteps(lowercase__ ) snake_case_ : Optional[int] = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: snake_case_ : List[str] = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: snake_case_ : str = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=lowercase__ , device=self.device , ) snake_case_ : Tuple = noise snake_case_ : str = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(lowercase__ , lowercase__ ) snake_case_ : Any = self.mel.audio_slice_to_image(lowercase__ ) snake_case_ : List[Any] = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape( (input_image.height, input_image.width) ) snake_case_ : str = (input_image / 2_55) * 2 - 1 snake_case_ : List[Any] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: snake_case_ : Any = self.vqvae.encode(torch.unsqueeze(lowercase__ , 0 ) ).latent_dist.sample( generator=lowercase__ )[0] snake_case_ : str = self.vqvae.config.scaling_factor * input_images if start_step > 0: snake_case_ : List[str] = self.scheduler.add_noise(lowercase__ , lowercase__ , self.scheduler.timesteps[start_step - 1] ) snake_case_ : Union[str, Any] = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) snake_case_ : Optional[int] = int(mask_start_secs * pixels_per_second ) snake_case_ : Union[str, Any] = int(mask_end_secs * pixels_per_second ) snake_case_ : List[Any] = self.scheduler.add_noise(lowercase__ , lowercase__ , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , lowercase__ ): snake_case_ : List[str] = self.unet(lowercase__ , lowercase__ , lowercase__ )["""sample"""] else: snake_case_ : Optional[Any] = self.unet(lowercase__ , lowercase__ )["""sample"""] if isinstance(self.scheduler , lowercase__ ): snake_case_ : Optional[Any] = self.scheduler.step( model_output=lowercase__ , timestep=lowercase__ , sample=lowercase__ , eta=lowercase__ , generator=lowercase__ , )["""prev_sample"""] else: snake_case_ : Dict = self.scheduler.step( model_output=lowercase__ , timestep=lowercase__ , sample=lowercase__ , generator=lowercase__ , )["""prev_sample"""] if mask is not None: if mask_start > 0: snake_case_ : Union[str, Any] = mask[:, step, :, :mask_start] if mask_end > 0: snake_case_ : List[str] = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance snake_case_ : str = 1 / self.vqvae.config.scaling_factor * images snake_case_ : Dict = self.vqvae.decode(lowercase__ )["""sample"""] snake_case_ : Dict = (images / 2 + 0.5).clamp(0 , 1 ) snake_case_ : int = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() snake_case_ : Optional[int] = (images * 2_55).round().astype("""uint8""" ) snake_case_ : Dict = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(lowercase__ , mode="""RGB""" ).convert("""L""" ) for _ in images) ) snake_case_ : Optional[int] = [self.mel.image_to_audio(lowercase__ ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(lowercase__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase__ ) ) @torch.no_grad() def __UpperCamelCase (self , lowercase__ , lowercase__ = 50 ): assert isinstance(self.scheduler , lowercase__ ) self.scheduler.set_timesteps(lowercase__ ) snake_case_ : Tuple = np.array( [np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] ) snake_case_ : Union[str, Any] = (sample / 2_55) * 2 - 1 snake_case_ : List[str] = torch.Tensor(lowercase__ ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): snake_case_ : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps snake_case_ : Optional[int] = self.scheduler.alphas_cumprod[t] snake_case_ : Any = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) snake_case_ : str = 1 - alpha_prod_t snake_case_ : List[Any] = self.unet(lowercase__ , lowercase__ )["""sample"""] snake_case_ : Tuple = (1 - alpha_prod_t_prev) ** 0.5 * model_output snake_case_ : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) snake_case_ : List[Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def __UpperCamelCase (lowercase__ , lowercase__ , lowercase__ ): snake_case_ : List[str] = acos(torch.dot(torch.flatten(lowercase__ ) , torch.flatten(lowercase__ ) ) / torch.norm(lowercase__ ) / torch.norm(lowercase__ ) ) return sin((1 - alpha) * theta ) * xa / sin(lowercase__ ) + sin(alpha * theta ) * xa / sin(lowercase__ )
48
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType a_ = logging.get_logger(__name__) a_ = { '''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''', '''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''', '''microsoft/deberta-v2-xlarge-mnli''': ( '''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json''' ), '''microsoft/deberta-v2-xxlarge-mnli''': ( '''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json''' ), } class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Dict = """deberta-v2""" def __init__(self , lowercase__=12_81_00 , lowercase__=15_36 , lowercase__=24 , lowercase__=24 , lowercase__=61_44 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=0 , lowercase__=0.02 , lowercase__=1e-7 , lowercase__=False , lowercase__=-1 , lowercase__=0 , lowercase__=True , lowercase__=None , lowercase__=0 , lowercase__="gelu" , **lowercase__ , ): super().__init__(**lowercase__ ) snake_case_ : Union[str, Any] = hidden_size snake_case_ : str = num_hidden_layers snake_case_ : Tuple = num_attention_heads snake_case_ : Dict = intermediate_size snake_case_ : Optional[int] = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Any = attention_probs_dropout_prob snake_case_ : List[Any] = max_position_embeddings snake_case_ : Union[str, Any] = type_vocab_size snake_case_ : Union[str, Any] = initializer_range snake_case_ : List[Any] = relative_attention snake_case_ : Dict = max_relative_positions snake_case_ : Optional[int] = pad_token_id snake_case_ : List[str] = position_biased_input # Backwards compatibility if type(lowercase__ ) == str: snake_case_ : Union[str, Any] = [x.strip() for x in pos_att_type.lower().split("""|""" )] snake_case_ : Optional[int] = pos_att_type snake_case_ : List[str] = vocab_size snake_case_ : Tuple = layer_norm_eps snake_case_ : List[Any] = kwargs.get("""pooler_hidden_size""" , lowercase__ ) snake_case_ : List[str] = pooler_dropout snake_case_ : int = pooler_hidden_act class __lowercase ( _UpperCAmelCase): """simple docstring""" @property def __UpperCamelCase (self ): if self.task == "multiple-choice": snake_case_ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: snake_case_ : int = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def __UpperCamelCase (self ): return 12 def __UpperCamelCase (self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , lowercase__ = 3 , lowercase__ = 40 , lowercase__ = 40 , lowercase__ = None , ): snake_case_ : str = super().generate_dummy_inputs(preprocessor=lowercase__ , framework=lowercase__ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
48
1
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ): """simple docstring""" snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ ) while cur > 1: # Find the maximum number in arr snake_case_ : Union[str, Any] = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi snake_case_ : int = arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE__ )] # Reverse whole list snake_case_ : Dict = arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE__ )] cur -= 1 return arr if __name__ == "__main__": a_ = input('''Enter numbers separated by a comma:\n''').strip() a_ = [int(item) for item in user_input.split(''',''')] print(pancake_sort(unsorted))
48
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
48
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { '''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''', } class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : List[str] = """roc_bert""" def __init__(self , lowercase__=3_05_22 , lowercase__=7_68 , lowercase__=12 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=True , lowercase__=0 , lowercase__="absolute" , lowercase__=None , lowercase__=True , lowercase__=True , lowercase__=7_68 , lowercase__=9_10 , lowercase__=5_12 , lowercase__=2_48_58 , lowercase__=True , **lowercase__ , ): snake_case_ : Optional[Any] = vocab_size snake_case_ : List[Any] = max_position_embeddings snake_case_ : str = hidden_size snake_case_ : List[Any] = num_hidden_layers snake_case_ : int = num_attention_heads snake_case_ : List[str] = intermediate_size snake_case_ : Tuple = hidden_act snake_case_ : Optional[Any] = hidden_dropout_prob snake_case_ : int = attention_probs_dropout_prob snake_case_ : List[str] = initializer_range snake_case_ : Optional[Any] = type_vocab_size snake_case_ : List[Any] = layer_norm_eps snake_case_ : str = use_cache snake_case_ : Any = enable_pronunciation snake_case_ : int = enable_shape snake_case_ : Any = pronunciation_embed_dim snake_case_ : int = pronunciation_vocab_size snake_case_ : Dict = shape_embed_dim snake_case_ : Union[str, Any] = shape_vocab_size snake_case_ : List[Any] = concat_input snake_case_ : List[Any] = position_embedding_type snake_case_ : int = classifier_dropout super().__init__(pad_token_id=lowercase__ , **lowercase__ )
48
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin a_ = get_tests_dir('''fixtures/test_sentencepiece.model''') a_ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''} a_ = '''>>zh<<''' a_ = '''Helsinki-NLP/''' if is_torch_available(): a_ = '''pt''' elif is_tf_available(): a_ = '''tf''' else: a_ = '''jax''' @require_sentencepiece class __lowercase ( _UpperCAmelCase , unittest.TestCase): """simple docstring""" _A : str = MarianTokenizer _A : List[str] = False _A : List[str] = True def __UpperCamelCase (self ): super().setUp() snake_case_ : Optional[int] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] snake_case_ : Any = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) ) snake_case_ : Any = Path(self.tmpdirname ) save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] ) save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] ) copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] ) snake_case_ : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __UpperCamelCase (self , **lowercase__ ): return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ ) def __UpperCamelCase (self , lowercase__ ): return ( "This is a test", "This is a test", ) def __UpperCamelCase (self ): snake_case_ : Union[str, Any] = """</s>""" snake_case_ : Tuple = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ ) def __UpperCamelCase (self ): snake_case_ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """</s>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(lowercase__ ) , 9 ) def __UpperCamelCase (self ): self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def __UpperCamelCase (self ): snake_case_ : Any = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' ) snake_case_ : Tuple = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) snake_case_ : Dict = [38, 1_21, 14, 6_97, 3_88_48, 0] self.assertListEqual(lowercase__ , batch.input_ids[0] ) snake_case_ : Tuple = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(lowercase__ ) snake_case_ : str = [x.name for x in Path(lowercase__ ).glob("""*""" )] self.assertIn("""source.spm""" , lowercase__ ) MarianTokenizer.from_pretrained(lowercase__ ) def __UpperCamelCase (self ): snake_case_ : Union[str, Any] = self.get_tokenizer() snake_case_ : List[str] = tok( ["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(batch.input_ids.shape , (2, 5_12) ) def __UpperCamelCase (self ): snake_case_ : Tuple = self.get_tokenizer() snake_case_ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def __UpperCamelCase (self ): # fmt: off snake_case_ : str = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , ) def __UpperCamelCase (self ): snake_case_ : Any = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" ) snake_case_ : Dict = """Tämä on testi""" snake_case_ : List[Any] = """This is a test""" snake_case_ : Optional[int] = [76, 7, 20_47, 2] snake_case_ : List[str] = [69, 12, 11, 9_40, 2] snake_case_ : Any = tokenizer(lowercase__ ).input_ids self.assertListEqual(lowercase__ , lowercase__ ) snake_case_ : str = tokenizer(text_target=lowercase__ ).input_ids self.assertListEqual(lowercase__ , lowercase__ ) snake_case_ : int = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ ) self.assertEqual(lowercase__ , lowercase__ )
48
1
"""simple docstring""" from __future__ import annotations from functools import lru_cache from math import ceil a_ = 100 a_ = set(range(3, NUM_PRIMES, 2)) primes.add(2) a_ = 42 for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=1_0_0 ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} snake_case_ : set[int] = set() snake_case_ : int snake_case_ : int for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 5_0_0_0 ): """simple docstring""" for number_to_partition in range(1 , SCREAMING_SNAKE_CASE__ ): if len(partition(SCREAMING_SNAKE_CASE__ ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F'''{solution() = }''')
48
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=_UpperCAmelCase) class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True}) _A : ClassVar[Features] = Features({"""audio""": Audio()}) _A : ClassVar[Features] = Features({"""transcription""": Value("""string""")}) _A : str = "audio" _A : str = "transcription" def __UpperCamelCase (self , lowercase__ ): if self.audio_column not in features: raise ValueError(f'Column {self.audio_column} is not present in features.' ) if not isinstance(features[self.audio_column] , lowercase__ ): raise ValueError(f'Column {self.audio_column} is not an Audio type.' ) snake_case_ : Optional[int] = copy.deepcopy(self ) snake_case_ : Tuple = self.input_schema.copy() snake_case_ : List[str] = features[self.audio_column] snake_case_ : Any = input_schema return task_template @property def __UpperCamelCase (self ): return {self.audio_column: "audio", self.transcription_column: "transcription"}
48
1
"""simple docstring""" from numpy import exp, pi, sqrt def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 ): """simple docstring""" return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
48
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL a_ = logging.get_logger(__name__) class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : int = ["""pixel_values"""] def __init__(self , lowercase__ = True , lowercase__ = None , lowercase__ = 0.9 , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = 1 / 2_55 , lowercase__ = True , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ): super().__init__(**lowercase__ ) snake_case_ : Tuple = size if size is not None else {"""shortest_edge""": 2_24} snake_case_ : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ ) snake_case_ : str = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} snake_case_ : Dict = get_size_dict(lowercase__ , param_name="""crop_size""" ) snake_case_ : Union[str, Any] = do_resize snake_case_ : List[str] = size snake_case_ : str = crop_pct snake_case_ : str = resample snake_case_ : Optional[Any] = do_center_crop snake_case_ : Dict = crop_size snake_case_ : int = do_rescale snake_case_ : Optional[int] = rescale_factor snake_case_ : str = do_normalize snake_case_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN snake_case_ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ): snake_case_ : Tuple = get_size_dict(lowercase__ , default_to_square=lowercase__ ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' ) if crop_pct is not None: if "shortest_edge" in size: snake_case_ : Optional[int] = int(size["""shortest_edge"""] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: snake_case_ : Dict = int(size["""height"""] / crop_pct ) else: snake_case_ : List[str] = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct )) else: raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) ) snake_case_ : List[Any] = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ ) else: if "shortest_edge" in size: snake_case_ : Optional[int] = get_resize_output_image_size(lowercase__ , size=size["""shortest_edge"""] , default_to_square=lowercase__ ) elif "height" in size and "width" in size: snake_case_ : int = (size["""height"""], size["""width"""]) else: raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) ) return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ): snake_case_ : int = get_size_dict(lowercase__ ) if "height" not in size or "width" not in size: raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' ) return center_crop(lowercase__ , size=(size["""height"""], size["""width"""]) , data_format=lowercase__ , **lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ): return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ): return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ): snake_case_ : str = do_resize if do_resize is not None else self.do_resize snake_case_ : Any = crop_pct if crop_pct is not None else self.crop_pct snake_case_ : List[Any] = resample if resample is not None else self.resample snake_case_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ : str = do_normalize if do_normalize is not None else self.do_normalize snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean snake_case_ : int = image_std if image_std is not None else self.image_std snake_case_ : List[Any] = size if size is not None else self.size snake_case_ : Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ ) snake_case_ : List[Any] = crop_size if crop_size is not None else self.crop_size snake_case_ : int = get_size_dict(lowercase__ , param_name="""crop_size""" ) snake_case_ : List[str] = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_pct is None: raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. snake_case_ : int = [to_numpy_array(lowercase__ ) for image in images] if do_resize: snake_case_ : str = [self.resize(image=lowercase__ , size=lowercase__ , crop_pct=lowercase__ , resample=lowercase__ ) for image in images] if do_center_crop: snake_case_ : Optional[int] = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images] if do_rescale: snake_case_ : List[Any] = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images] if do_normalize: snake_case_ : Optional[Any] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images] snake_case_ : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images] snake_case_ : Dict = {"""pixel_values""": images} return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
48
1
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : bytes ): """simple docstring""" return "".join([hex(SCREAMING_SNAKE_CASE__ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE__ )] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" if (len(SCREAMING_SNAKE_CASE__ ) % 2) != 0: raise ValueError( """Base16 encoded data is invalid: Data does not have an even number of hex digits.""" ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(SCREAMING_SNAKE_CASE__ ) <= set("""0123456789ABCDEF""" ): raise ValueError( """Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.""" ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
48
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: a_ = None a_ = logging.get_logger(__name__) a_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} a_ = { '''vocab_file''': { '''facebook/mbart-large-en-ro''': ( '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model''' ), '''facebook/mbart-large-cc25''': ( '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''', '''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''', }, } a_ = { '''facebook/mbart-large-en-ro''': 1024, '''facebook/mbart-large-cc25''': 1024, } # fmt: off a_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN'''] class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Dict = VOCAB_FILES_NAMES _A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _A : str = ["""input_ids""", """attention_mask"""] _A : Tuple = MBartTokenizer _A : List[int] = [] _A : List[int] = [] def __init__(self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ): # Mask token behave like a normal word, i.e. include the space before it snake_case_ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token super().__init__( vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , ) snake_case_ : Dict = vocab_file snake_case_ : Optional[int] = False if not self.vocab_file else True snake_case_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} ) snake_case_ : Any = { lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } snake_case_ : Tuple = src_lang if src_lang is not None else """en_XX""" snake_case_ : Tuple = self.convert_tokens_to_ids(self._src_lang ) snake_case_ : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __UpperCamelCase (self ): return self._src_lang @src_lang.setter def __UpperCamelCase (self , lowercase__ ): snake_case_ : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __UpperCamelCase (self , lowercase__ , lowercase__ = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __UpperCamelCase (self , lowercase__ , lowercase__ = None ): snake_case_ : List[Any] = [self.sep_token_id] snake_case_ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ): if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) snake_case_ : int = src_lang snake_case_ : List[str] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ ) snake_case_ : List[str] = self.convert_tokens_to_ids(lowercase__ ) snake_case_ : Union[str, Any] = tgt_lang_id return inputs def __UpperCamelCase (self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ): snake_case_ : List[str] = src_lang snake_case_ : int = tgt_lang return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ ) def __UpperCamelCase (self ): return self.set_src_lang_special_tokens(self.src_lang ) def __UpperCamelCase (self ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __UpperCamelCase (self , lowercase__ ): snake_case_ : int = self.convert_tokens_to_ids(lowercase__ ) snake_case_ : Tuple = [] snake_case_ : List[Any] = [self.eos_token_id, self.cur_lang_code] snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens ) snake_case_ : Optional[int] = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __UpperCamelCase (self , lowercase__ ): snake_case_ : Tuple = self.convert_tokens_to_ids(lowercase__ ) snake_case_ : Optional[int] = [] snake_case_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code] snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) snake_case_ : int = self.convert_ids_to_tokens(self.suffix_tokens ) snake_case_ : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __UpperCamelCase (self , lowercase__ , lowercase__ = None ): if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(lowercase__ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory.' ) return snake_case_ : List[str] = os.path.join( lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ): copyfile(self.vocab_file , lowercase__ ) return (out_vocab_file,)
48
1
"""simple docstring""" import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer a_ = ['''gpt2'''] a_ = '''gpt2''' if is_tf_available(): class __lowercase ( tf.Module): """simple docstring""" def __init__(self , lowercase__ ): super().__init__() snake_case_ : Any = tokenizer snake_case_ : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ ) snake_case_ : Union[str, Any] = TFGPTaLMHeadModel.from_config(lowercase__ ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) ) def __UpperCamelCase (self , lowercase__ ): snake_case_ : str = self.tokenizer(lowercase__ ) snake_case_ : str = tokenized["""input_ids"""].to_tensor() snake_case_ : Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) snake_case_ : str = self.model(input_ids=lowercase__ , attention_mask=lowercase__ )["""logits"""] return outputs @require_tf @require_keras_nlp class __lowercase ( unittest.TestCase): """simple docstring""" def __UpperCamelCase (self ): super().setUp() snake_case_ : List[Any] = [GPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)] snake_case_ : Optional[Any] = [TFGPTaTokenizer.from_pretrained(lowercase__ ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) snake_case_ : Any = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] snake_case_ : Union[str, Any] = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def __UpperCamelCase (self ): for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: snake_case_ : List[str] = tokenizer([test_inputs] , return_tensors="""tf""" ) snake_case_ : Tuple = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors snake_case_ : Dict = python_outputs[key].numpy() snake_case_ : Dict = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(lowercase__ , tf.intaa ) == tf_outputs_values ) ) @slow def __UpperCamelCase (self ): for tf_tokenizer in self.tf_tokenizers: snake_case_ : int = tf.function(lowercase__ ) for test_inputs in self.test_sentences: snake_case_ : int = tf.constant(lowercase__ ) snake_case_ : str = compiled_tokenizer(lowercase__ ) snake_case_ : Dict = tf_tokenizer(lowercase__ ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def __UpperCamelCase (self ): for tf_tokenizer in self.tf_tokenizers: snake_case_ : Optional[int] = ModelToSave(tokenizer=lowercase__ ) snake_case_ : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] ) snake_case_ : Tuple = model.serving(lowercase__ ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: snake_case_ : int = Path(lowercase__ ) / """saved.model""" tf.saved_model.save(lowercase__ , lowercase__ , signatures={"""serving_default""": model.serving} ) snake_case_ : Tuple = tf.saved_model.load(lowercase__ ) snake_case_ : List[Any] = loaded_model.signatures["""serving_default"""](lowercase__ )["""output_0"""] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def __UpperCamelCase (self ): for tf_tokenizer in self.tf_tokenizers: snake_case_ : List[str] = tf.convert_to_tensor([self.test_sentences[0]] ) snake_case_ : List[Any] = tf_tokenizer(lowercase__ ) # Build model with some sample inputs snake_case_ : Any = tf_tokenizer.get_config() snake_case_ : List[Any] = TFGPTaTokenizer.from_config(lowercase__ ) snake_case_ : List[str] = model_from_config(lowercase__ ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def __UpperCamelCase (self ): for tf_tokenizer in self.tf_tokenizers: # for the test to run snake_case_ : Tuple = 12_31_23 for max_length in [3, 5, 10_24]: snake_case_ : Tuple = tf.convert_to_tensor([self.test_sentences[0]] ) snake_case_ : List[Any] = tf_tokenizer(lowercase__ , max_length=lowercase__ ) snake_case_ : Union[str, Any] = out["""input_ids"""].numpy().shape[1] assert out_length == max_length
48
"""simple docstring""" import argparse import hashlib # hashlib is only used inside the Test class import struct class __lowercase : """simple docstring""" def __init__(self , lowercase__ ): snake_case_ : Union[str, Any] = data snake_case_ : List[str] = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0] @staticmethod def __UpperCamelCase (lowercase__ , lowercase__ ): return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f def __UpperCamelCase (self ): snake_case_ : Any = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64) snake_case_ : Tuple = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) ) return padded_data def __UpperCamelCase (self ): return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def __UpperCamelCase (self , lowercase__ ): snake_case_ : int = list(struct.unpack(""">16L""" , lowercase__ ) ) + [0] * 64 for i in range(16 , 80 ): snake_case_ : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def __UpperCamelCase (self ): snake_case_ : List[Any] = self.padding() snake_case_ : Any = self.split_blocks() for block in self.blocks: snake_case_ : Any = self.expand_block(lowercase__ ) snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = self.h for i in range(0 , 80 ): if 0 <= i < 20: snake_case_ : Optional[Any] = (b & c) | ((~b) & d) snake_case_ : List[str] = 0X5_a_8_2_7_9_9_9 elif 20 <= i < 40: snake_case_ : Union[str, Any] = b ^ c ^ d snake_case_ : Tuple = 0X6_e_d_9_e_b_a_1 elif 40 <= i < 60: snake_case_ : str = (b & c) | (b & d) | (c & d) snake_case_ : List[str] = 0X8_f_1_b_b_c_d_c elif 60 <= i < 80: snake_case_ : Tuple = b ^ c ^ d snake_case_ : str = 0Xc_a_6_2_c_1_d_6 snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[Any] = ( self.rotate(lowercase__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f, a, self.rotate(lowercase__ , 30 ), c, d, ) snake_case_ : Any = ( self.h[0] + a & 0Xf_f_f_f_f_f_f_f, self.h[1] + b & 0Xf_f_f_f_f_f_f_f, self.h[2] + c & 0Xf_f_f_f_f_f_f_f, self.h[3] + d & 0Xf_f_f_f_f_f_f_f, self.h[4] + e & 0Xf_f_f_f_f_f_f_f, ) return ("{:08x}" * 5).format(*self.h ) def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : Union[str, Any] = b"""Test String""" assert SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE__ ).hexdigest() # noqa: S324 def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : int = argparse.ArgumentParser(description="""Process some strings or files""" ) parser.add_argument( """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) snake_case_ : Optional[int] = parser.parse_args() snake_case_ : Optional[int] = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: snake_case_ : List[str] = f.read() else: snake_case_ : Dict = bytes(SCREAMING_SNAKE_CASE__ , """utf-8""" ) print(SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
48
1
"""simple docstring""" import random import sys import numpy as np from matplotlib import pyplot as plt from matplotlib.colors import ListedColormap a_ = '''Usage of script: script_name <size_of_canvas:int>''' a_ = [0] * 100 + [1] * 10 random.shuffle(choice) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" snake_case_ : int = [[False for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )] return canvas def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[list[bool]] ): """simple docstring""" for i, row in enumerate(SCREAMING_SNAKE_CASE__ ): for j, _ in enumerate(SCREAMING_SNAKE_CASE__ ): snake_case_ : List[str] = bool(random.getrandbits(1 ) ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[list[bool]] ): """simple docstring""" snake_case_ : int = np.array(SCREAMING_SNAKE_CASE__ ) snake_case_ : int = np.array(create_canvas(current_canvas.shape[0] ) ) for r, row in enumerate(SCREAMING_SNAKE_CASE__ ): for c, pt in enumerate(SCREAMING_SNAKE_CASE__ ): snake_case_ : Dict = __judge_point( SCREAMING_SNAKE_CASE__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] ) snake_case_ : Dict = next_gen_canvas del next_gen_canvas # cleaning memory as we move on. snake_case_ : list[list[bool]] = current_canvas.tolist() return return_canvas def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : list[list[bool]] ): """simple docstring""" snake_case_ : List[str] = 0 snake_case_ : List[Any] = 0 # finding dead or alive neighbours count. for i in neighbours: for status in i: if status: alive += 1 else: dead += 1 # handling duplicate entry for focus pt. if pt: alive -= 1 else: dead -= 1 # running the rules of game here. snake_case_ : Tuple = pt if pt: if alive < 2: snake_case_ : Optional[int] = False elif alive == 2 or alive == 3: snake_case_ : Optional[Any] = True elif alive > 3: snake_case_ : Dict = False else: if alive == 3: snake_case_ : Dict = True return state if __name__ == "__main__": if len(sys.argv) != 2: raise Exception(usage_doc) a_ = int(sys.argv[1]) # main working structure of this module. a_ = create_canvas(canvas_size) seed(c) a_ , a_ = plt.subplots() fig.show() a_ = ListedColormap(['''w''', '''k''']) try: while True: a_ = run(c) ax.matshow(c, cmap=cmap) fig.canvas.draw() ax.cla() except KeyboardInterrupt: # do nothing. pass
48
"""simple docstring""" from manim import * class __lowercase ( _UpperCAmelCase): """simple docstring""" def __UpperCamelCase (self ): snake_case_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 ) snake_case_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )] snake_case_ : str = [mem.copy() for i in range(6 )] snake_case_ : str = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : Any = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : List[str] = VGroup(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : List[Any] = Text("""CPU""" , font_size=24 ) snake_case_ : Tuple = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowercase__ ) snake_case_ : List[Any] = [mem.copy() for i in range(4 )] snake_case_ : Tuple = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : List[str] = Text("""GPU""" , font_size=24 ) snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ ) gpu.move_to([-1, -1, 0] ) self.add(lowercase__ ) snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )] snake_case_ : List[Any] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : Dict = Text("""Model""" , font_size=24 ) snake_case_ : int = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ ) model.move_to([3, -1.0, 0] ) self.add(lowercase__ ) snake_case_ : Dict = [] for i, rect in enumerate(lowercase__ ): rect.set_stroke(lowercase__ ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) snake_case_ : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase__ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase__ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=lowercase__ , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase__ , buff=0.0 ) self.add(lowercase__ ) cpu_targs.append(lowercase__ ) snake_case_ : List[str] = [mem.copy() for i in range(6 )] snake_case_ : List[str] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : str = Text("""Loaded Checkpoint""" , font_size=24 ) snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , aligned_edge=lowercase__ , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) snake_case_ : Optional[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) snake_case_ : Union[str, Any] = MarkupText( f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowercase__ , lowercase__ ) snake_case_ : List[Any] = MarkupText( f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , ) blue_text.next_to(lowercase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) snake_case_ : List[Any] = MarkupText( f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowercase__ ) , Write(lowercase__ ) ) self.play(Write(lowercase__ , run_time=1 ) , Create(lowercase__ , run_time=1 ) ) snake_case_ : Optional[int] = [] snake_case_ : List[str] = [] for i, rect in enumerate(lowercase__ ): snake_case_ : Optional[Any] = fill.copy().set_fill(lowercase__ , opacity=0.7 ) target.move_to(lowercase__ ) first_animations.append(GrowFromCenter(lowercase__ , run_time=1 ) ) snake_case_ : List[Any] = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(lowercase__ , run_time=1.5 ) ) self.play(*lowercase__ ) self.play(*lowercase__ ) self.wait()
48
1
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType a_ = logging.get_logger(__name__) a_ = { '''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''', '''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''', '''microsoft/deberta-v2-xlarge-mnli''': ( '''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json''' ), '''microsoft/deberta-v2-xxlarge-mnli''': ( '''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json''' ), } class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Dict = """deberta-v2""" def __init__(self , lowercase__=12_81_00 , lowercase__=15_36 , lowercase__=24 , lowercase__=24 , lowercase__=61_44 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=0 , lowercase__=0.02 , lowercase__=1e-7 , lowercase__=False , lowercase__=-1 , lowercase__=0 , lowercase__=True , lowercase__=None , lowercase__=0 , lowercase__="gelu" , **lowercase__ , ): super().__init__(**lowercase__ ) snake_case_ : Union[str, Any] = hidden_size snake_case_ : str = num_hidden_layers snake_case_ : Tuple = num_attention_heads snake_case_ : Dict = intermediate_size snake_case_ : Optional[int] = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Any = attention_probs_dropout_prob snake_case_ : List[Any] = max_position_embeddings snake_case_ : Union[str, Any] = type_vocab_size snake_case_ : Union[str, Any] = initializer_range snake_case_ : List[Any] = relative_attention snake_case_ : Dict = max_relative_positions snake_case_ : Optional[int] = pad_token_id snake_case_ : List[str] = position_biased_input # Backwards compatibility if type(lowercase__ ) == str: snake_case_ : Union[str, Any] = [x.strip() for x in pos_att_type.lower().split("""|""" )] snake_case_ : Optional[int] = pos_att_type snake_case_ : List[str] = vocab_size snake_case_ : Tuple = layer_norm_eps snake_case_ : List[Any] = kwargs.get("""pooler_hidden_size""" , lowercase__ ) snake_case_ : List[str] = pooler_dropout snake_case_ : int = pooler_hidden_act class __lowercase ( _UpperCAmelCase): """simple docstring""" @property def __UpperCamelCase (self ): if self.task == "multiple-choice": snake_case_ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: snake_case_ : int = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def __UpperCamelCase (self ): return 12 def __UpperCamelCase (self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , lowercase__ = 3 , lowercase__ = 40 , lowercase__ = 40 , lowercase__ = None , ): snake_case_ : str = super().generate_dummy_inputs(preprocessor=lowercase__ , framework=lowercase__ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
48
"""simple docstring""" from random import randint from tempfile import TemporaryFile import numpy as np def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" snake_case_ : Union[str, Any] = 0 if start < end: snake_case_ : Union[str, Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ : List[Any] = a[end] snake_case_ : Dict = a[pivot] snake_case_ : Any = temp snake_case_ , snake_case_ : Dict = _in_place_partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , p - 1 ) count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , p + 1 , SCREAMING_SNAKE_CASE__ ) return count def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ): """simple docstring""" snake_case_ : Tuple = 0 snake_case_ : List[Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ : Dict = a[end] snake_case_ : List[Any] = a[pivot] snake_case_ : Optional[Any] = temp snake_case_ : List[str] = start - 1 for index in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value snake_case_ : Any = new_pivot_index + 1 snake_case_ : Tuple = a[new_pivot_index] snake_case_ : Optional[int] = a[index] snake_case_ : Tuple = temp snake_case_ : Union[str, Any] = a[new_pivot_index + 1] snake_case_ : Union[str, Any] = a[end] snake_case_ : Union[str, Any] = temp return new_pivot_index + 1, count a_ = TemporaryFile() a_ = 100 # 1000 elements are to be sorted a_ , a_ = 0, 1 # mean and standard deviation a_ = np.random.normal(mu, sigma, p) np.save(outfile, X) print('''The array is''') print(X) outfile.seek(0) # using the same array a_ = np.load(outfile) a_ = len(M) - 1 a_ = _in_place_quick_sort(M, 0, r) print( '''No of Comparisons for 100 elements selected from a standard normal distribution''' '''is :''' ) print(z)
48
1
"""simple docstring""" import unittest import numpy as np from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torchaudio, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class __lowercase ( unittest.TestCase): """simple docstring""" _A : List[Any] = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING _A : List[str] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : str = AudioClassificationPipeline(model=lowercase__ , feature_extractor=lowercase__ ) # test with a raw waveform snake_case_ : Union[str, Any] = np.zeros((3_40_00,) ) snake_case_ : List[Any] = np.zeros((1_40_00,) ) return audio_classifier, [audioa, audio] def __UpperCamelCase (self , lowercase__ , lowercase__ ): snake_case_ , snake_case_ : int = examples snake_case_ : Dict = audio_classifier(lowercase__ ) # by default a model is initialized with num_labels=2 self.assertEqual( lowercase__ , [ {"""score""": ANY(lowercase__ ), """label""": ANY(lowercase__ )}, {"""score""": ANY(lowercase__ ), """label""": ANY(lowercase__ )}, ] , ) snake_case_ : List[str] = audio_classifier(lowercase__ , top_k=1 ) self.assertEqual( lowercase__ , [ {"""score""": ANY(lowercase__ ), """label""": ANY(lowercase__ )}, ] , ) self.run_torchaudio(lowercase__ ) @require_torchaudio def __UpperCamelCase (self , lowercase__ ): import datasets # test with a local file snake_case_ : Optional[int] = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) snake_case_ : Union[str, Any] = dataset[0]["""audio"""]["""array"""] snake_case_ : Union[str, Any] = audio_classifier(lowercase__ ) self.assertEqual( lowercase__ , [ {"""score""": ANY(lowercase__ ), """label""": ANY(lowercase__ )}, {"""score""": ANY(lowercase__ ), """label""": ANY(lowercase__ )}, ] , ) @require_torch def __UpperCamelCase (self ): snake_case_ : List[str] = """anton-l/wav2vec2-random-tiny-classifier""" snake_case_ : List[str] = pipeline("""audio-classification""" , model=lowercase__ ) snake_case_ : Dict = np.ones((80_00,) ) snake_case_ : Tuple = audio_classifier(lowercase__ , top_k=4 ) snake_case_ : Dict = [ {"""score""": 0.0842, """label""": """no"""}, {"""score""": 0.0838, """label""": """up"""}, {"""score""": 0.0837, """label""": """go"""}, {"""score""": 0.0834, """label""": """right"""}, ] snake_case_ : str = [ {"""score""": 0.0845, """label""": """stop"""}, {"""score""": 0.0844, """label""": """on"""}, {"""score""": 0.0841, """label""": """right"""}, {"""score""": 0.0834, """label""": """left"""}, ] self.assertIn(nested_simplify(lowercase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) snake_case_ : Union[str, Any] = {"""array""": np.ones((80_00,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate} snake_case_ : Tuple = audio_classifier(lowercase__ , top_k=4 ) self.assertIn(nested_simplify(lowercase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) @require_torch @slow def __UpperCamelCase (self ): import datasets snake_case_ : Union[str, Any] = """superb/wav2vec2-base-superb-ks""" snake_case_ : List[Any] = pipeline("""audio-classification""" , model=lowercase__ ) snake_case_ : Optional[int] = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" ) snake_case_ : str = np.array(dataset[3]["""speech"""] , dtype=np.floataa ) snake_case_ : Dict = audio_classifier(lowercase__ , top_k=4 ) self.assertEqual( nested_simplify(lowercase__ , decimals=3 ) , [ {"""score""": 0.981, """label""": """go"""}, {"""score""": 0.007, """label""": """up"""}, {"""score""": 0.006, """label""": """_unknown_"""}, {"""score""": 0.001, """label""": """down"""}, ] , ) @require_tf @unittest.skip("""Audio classification is not implemented for TF""" ) def __UpperCamelCase (self ): pass
48
"""simple docstring""" import random def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : bool = False ): """simple docstring""" snake_case_ : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE__ )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(SCREAMING_SNAKE_CASE__ ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(SCREAMING_SNAKE_CASE__ ): for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ): if random.random() < probability: graph[i].append(SCREAMING_SNAKE_CASE__ ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(SCREAMING_SNAKE_CASE__ ) return graph def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" return { i: [j for j in range(SCREAMING_SNAKE_CASE__ ) if i != j] for i in range(SCREAMING_SNAKE_CASE__ ) } if __name__ == "__main__": import doctest doctest.testmod()
48
1
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. a_ = abspath(join(dirname(dirname(dirname(__file__))), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] ): """simple docstring""" from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" from transformers.testing_utils import pytest_terminal_summary_main snake_case_ : List[Any] = terminalreporter.config.getoption("""--make-reports""" ) if make_reports: pytest_terminal_summary_main(SCREAMING_SNAKE_CASE__ , id=SCREAMING_SNAKE_CASE__ )
48
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json''' ), } class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Optional[int] = """dpr""" def __init__(self , lowercase__=3_05_22 , lowercase__=7_68 , lowercase__=12 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__ = 0 , **lowercase__ , ): super().__init__(pad_token_id=lowercase__ , **lowercase__ ) snake_case_ : List[Any] = vocab_size snake_case_ : List[str] = hidden_size snake_case_ : Tuple = num_hidden_layers snake_case_ : List[Any] = num_attention_heads snake_case_ : int = hidden_act snake_case_ : Dict = intermediate_size snake_case_ : int = hidden_dropout_prob snake_case_ : Any = attention_probs_dropout_prob snake_case_ : str = max_position_embeddings snake_case_ : List[str] = type_vocab_size snake_case_ : List[str] = initializer_range snake_case_ : Optional[int] = layer_norm_eps snake_case_ : Union[str, Any] = projection_dim snake_case_ : str = position_embedding_type
48
1
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict ): """simple docstring""" snake_case_ : Union[str, Any] = [] snake_case_ : str = set({"""(""", """[""", """{"""} ) snake_case_ : List[str] = set({""")""", """]""", """}"""} ) snake_case_ : Union[str, Any] = {"""{""": """}""", """[""": """]""", """(""": """)"""} for i in range(len(SCREAMING_SNAKE_CASE__ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(SCREAMING_SNAKE_CASE__ ) == 0 or (len(SCREAMING_SNAKE_CASE__ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(SCREAMING_SNAKE_CASE__ ) == 0 def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : Optional[Any] = input("""Enter sequence of brackets: """ ) if is_balanced(SCREAMING_SNAKE_CASE__ ): print(SCREAMING_SNAKE_CASE__ , """is balanced""" ) else: print(SCREAMING_SNAKE_CASE__ , """is not balanced""" ) if __name__ == "__main__": main()
48
"""simple docstring""" import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm a_ = re.compile('''[^A-Za-z_0-9]''') # parameters used in DuplicationIndex a_ = 10 a_ = 256 def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" if len(SCREAMING_SNAKE_CASE__ ) < MIN_NUM_TOKENS: return None snake_case_ : Union[str, Any] = MinHash(num_perm=SCREAMING_SNAKE_CASE__ ) for token in set(SCREAMING_SNAKE_CASE__ ): min_hash.update(token.encode() ) return min_hash def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE__ ) if len(t.strip() ) > 0} class __lowercase : """simple docstring""" def __init__(self , *, lowercase__ = 0.85 , ): snake_case_ : Tuple = duplication_jaccard_threshold snake_case_ : Optional[Any] = NUM_PERM snake_case_ : Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) snake_case_ : List[Any] = defaultdict(lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ ): snake_case_ : int = self._index.query(lowercase__ ) if code_key in self._index.keys: print(f'Duplicate key {code_key}' ) return self._index.insert(lowercase__ , lowercase__ ) if len(lowercase__ ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(lowercase__ ) break else: self._duplicate_clusters[close_duplicates[0]].add(lowercase__ ) def __UpperCamelCase (self ): snake_case_ : str = [] for base, duplicates in self._duplicate_clusters.items(): snake_case_ : Optional[Any] = [base] + list(lowercase__ ) # reformat the cluster to be a list of dict snake_case_ : Any = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster] duplicate_clusters.append(lowercase__ ) return duplicate_clusters def __UpperCamelCase (self , lowercase__ ): snake_case_ : int = self.get_duplicate_clusters() with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" snake_case_ , snake_case_ : str = element snake_case_ : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] ): """simple docstring""" with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ): if data is not None: yield data def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float ): """simple docstring""" snake_case_ : int = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE__ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE__ ) ) , max_queue_size=1_0_0 ) ): di.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" snake_case_ : int = get_tokens(SCREAMING_SNAKE_CASE__ ) snake_case_ : Tuple = get_tokens(SCREAMING_SNAKE_CASE__ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) a_ = None def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" snake_case_ : Optional[Any] = [] for elementa in cluster: snake_case_ : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""] for elementa in extremes: snake_case_ : Any = _shared_dataset[elementa["""base_index"""]]["""content"""] if jaccard_similarity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) >= jaccard_threshold: elementa["copies"] += 1 break else: snake_case_ : Union[str, Any] = 1 extremes.append(SCREAMING_SNAKE_CASE__ ) return extremes def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): """simple docstring""" global _shared_dataset snake_case_ : str = dataset snake_case_ : int = [] snake_case_ : Optional[int] = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE__ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) , total=len(SCREAMING_SNAKE_CASE__ ) , ): extremes_list.append(SCREAMING_SNAKE_CASE__ ) return extremes_list def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float = 0.85 ): """simple docstring""" snake_case_ : List[str] = make_duplicate_clusters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster} snake_case_ : str = {} snake_case_ : Dict = find_extremes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for extremes in extremes_clusters: for element in extremes: snake_case_ : int = element snake_case_ : Optional[int] = duplicate_indices - set(extreme_dict.keys() ) snake_case_ : List[Any] = dataset.filter(lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE__ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: snake_case_ : List[Any] = element["""base_index"""] in extreme_dict if element["is_extreme"]: snake_case_ : str = extreme_dict[element["""base_index"""]]["""copies"""] print(f'Original dataset size: {len(SCREAMING_SNAKE_CASE__ )}' ) print(f'Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE__ )}' ) print(f'Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' ) print(f'Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' ) print(f'Filtered dataset size: {len(SCREAMING_SNAKE_CASE__ )}' ) return ds_filter, duplicate_clusters
48
1
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging a_ = logging.get_logger(__name__) class __lowercase ( _UpperCAmelCase): """simple docstring""" def __init__(self , lowercase__ ): super().__init__() snake_case_ : List[Any] = nn.ModuleList(lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = True , ): for i, (image, scale, controlnet) in enumerate(zip(lowercase__ , lowercase__ , self.nets ) ): snake_case_ , snake_case_ : str = controlnet( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) # merge samples if i == 0: snake_case_ , snake_case_ : Tuple = down_samples, mid_sample else: snake_case_ : int = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(lowercase__ , lowercase__ ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def __UpperCamelCase (self , lowercase__ , lowercase__ = True , lowercase__ = None , lowercase__ = False , lowercase__ = None , ): snake_case_ : Optional[int] = 0 snake_case_ : List[str] = save_directory for controlnet in self.nets: controlnet.save_pretrained( lowercase__ , is_main_process=lowercase__ , save_function=lowercase__ , safe_serialization=lowercase__ , variant=lowercase__ , ) idx += 1 snake_case_ : Any = model_path_to_save + f'_{idx}' @classmethod def __UpperCamelCase (cls , lowercase__ , **lowercase__ ): snake_case_ : Any = 0 snake_case_ : Union[str, Any] = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... snake_case_ : int = pretrained_model_path while os.path.isdir(lowercase__ ): snake_case_ : Any = ControlNetModel.from_pretrained(lowercase__ , **lowercase__ ) controlnets.append(lowercase__ ) idx += 1 snake_case_ : Dict = pretrained_model_path + f'_{idx}' logger.info(f'{len(lowercase__ )} controlnets loaded from {pretrained_model_path}.' ) if len(lowercase__ ) == 0: raise ValueError( f'No ControlNets found under {os.path.dirname(lowercase__ )}. Expected at least {pretrained_model_path + "_0"}.' ) return cls(lowercase__ )
48
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) a_ = logging.getLogger(__name__) if __name__ == "__main__": a_ = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=30522, type=int) a_ = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, '''rb''') as fp: a_ = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') a_ = Counter() for tk_ids in data: counter.update(tk_ids) a_ = [0] * args.vocab_size for k, v in counter.items(): a_ = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
48
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase): """simple docstring""" _A : Tuple = StableUnCLIPImgaImgPipeline _A : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS _A : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _A : str = frozenset( []) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _A : int = frozenset([]) def __UpperCamelCase (self ): snake_case_ : Tuple = 32 snake_case_ : Union[str, Any] = embedder_hidden_size # image encoding components snake_case_ : int = CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) snake_case_ : Any = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=lowercase__ , projection_dim=lowercase__ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) snake_case_ : Union[str, Any] = StableUnCLIPImageNormalizer(embedding_dim=lowercase__ ) snake_case_ : Dict = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) snake_case_ : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) snake_case_ : Dict = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) ) torch.manual_seed(0 ) snake_case_ : Optional[Any] = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase__ , layers_per_block=1 , upcast_attention=lowercase__ , use_linear_projection=lowercase__ , ) torch.manual_seed(0 ) snake_case_ : Tuple = DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.00085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=lowercase__ , steps_offset=1 , ) torch.manual_seed(0 ) snake_case_ : int = AutoencoderKL() snake_case_ : str = { # image encoding components """feature_extractor""": feature_extractor, """image_encoder""": image_encoder.eval(), # image noising components """image_normalizer""": image_normalizer.eval(), """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder.eval(), """unet""": unet.eval(), """scheduler""": scheduler, """vae""": vae.eval(), } return components def __UpperCamelCase (self , lowercase__ , lowercase__=0 , lowercase__=True ): if str(lowercase__ ).startswith("""mps""" ): snake_case_ : List[Any] = torch.manual_seed(lowercase__ ) else: snake_case_ : Dict = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ ) snake_case_ : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase__ ) ).to(lowercase__ ) if pil_image: snake_case_ : Tuple = input_image * 0.5 + 0.5 snake_case_ : Optional[int] = input_image.clamp(0 , 1 ) snake_case_ : Dict = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() snake_case_ : Tuple = DiffusionPipeline.numpy_to_pil(lowercase__ )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def __UpperCamelCase (self ): snake_case_ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case_ : Union[str, Any] = self.get_dummy_components() snake_case_ : str = StableUnCLIPImgaImgPipeline(**lowercase__ ) snake_case_ : Dict = sd_pipe.to(lowercase__ ) sd_pipe.set_progress_bar_config(disable=lowercase__ ) snake_case_ : int = self.get_dummy_inputs(lowercase__ ) inputs.update({"""image_embeds""": None} ) snake_case_ : int = sd_pipe(**lowercase__ ).images snake_case_ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case_ : Optional[int] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __UpperCamelCase (self ): snake_case_ : int = torch_device in ["""cpu""", """mps"""] self._test_attention_slicing_forward_pass(test_max_difference=lowercase__ ) def __UpperCamelCase (self ): snake_case_ : Dict = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=lowercase__ ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __UpperCamelCase (self ): self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowercase__ ) @slow @require_torch_gpu class __lowercase ( unittest.TestCase): """simple docstring""" def __UpperCamelCase (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase (self ): snake_case_ : Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) snake_case_ : Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" ) snake_case_ : str = StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa ) pipe.to(lowercase__ ) pipe.set_progress_bar_config(disable=lowercase__ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() snake_case_ : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) snake_case_ : List[str] = pipe(lowercase__ , """anime turle""" , generator=lowercase__ , output_type="""np""" ) snake_case_ : int = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(lowercase__ , lowercase__ ) def __UpperCamelCase (self ): snake_case_ : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) snake_case_ : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" ) snake_case_ : Dict = StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa ) pipe.to(lowercase__ ) pipe.set_progress_bar_config(disable=lowercase__ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() snake_case_ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 ) snake_case_ : Dict = pipe(lowercase__ , """anime turle""" , generator=lowercase__ , output_type="""np""" ) snake_case_ : Dict = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(lowercase__ , lowercase__ ) def __UpperCamelCase (self ): snake_case_ : Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case_ : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa ) snake_case_ : List[str] = pipe.to(lowercase__ ) pipe.set_progress_bar_config(disable=lowercase__ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() snake_case_ : str = pipe( lowercase__ , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , ) snake_case_ : Tuple = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
48
"""simple docstring""" import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ): """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): """simple docstring""" snake_case_ : Optional[Any] = tmp_path / """cache""" snake_case_ : Optional[int] = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): snake_case_ : Tuple = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read() _check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): """simple docstring""" snake_case_ : List[Any] = tmp_path / """cache""" snake_case_ : int = {"""text""": """string"""} snake_case_ : Any = features.copy() if features else default_expected_features snake_case_ : List[Any] = ( Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None ) snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" snake_case_ : Union[str, Any] = tmp_path / """cache""" snake_case_ : Optional[Any] = {"""text""": """string"""} snake_case_ : Optional[int] = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read() _check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ): """simple docstring""" if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ : List[str] = text_path elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ : str = [text_path] snake_case_ : List[str] = tmp_path / """cache""" snake_case_ : List[str] = {"""text""": """string"""} snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=("train",) ): """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for split in splits: snake_case_ : Dict = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" snake_case_ : int = tmp_path / """cache""" snake_case_ : List[str] = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): snake_case_ : Optional[Any] = TextDatasetReader({"""train""": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read() _check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" snake_case_ : Tuple = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" snake_case_ : List[str] = {"""text""": """string"""} snake_case_ : int = features.copy() if features else default_expected_features snake_case_ : Tuple = ( Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None ) snake_case_ : str = TextDatasetReader({"""train""": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" if split: snake_case_ : Union[str, Any] = {split: text_path} else: snake_case_ : Union[str, Any] = """train""" snake_case_ : int = {"""train""": text_path, """test""": text_path} snake_case_ : List[Any] = tmp_path / """cache""" snake_case_ : Tuple = {"""text""": """string"""} snake_case_ : int = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
48
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = { '''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ '''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LiltForQuestionAnswering''', '''LiltForSequenceClassification''', '''LiltForTokenClassification''', '''LiltModel''', '''LiltPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
48
"""simple docstring""" from copy import deepcopy class __lowercase : """simple docstring""" def __init__(self , lowercase__ = None , lowercase__ = None ): if arr is None and size is not None: snake_case_ : str = size snake_case_ : Optional[Any] = [0] * size elif arr is not None: self.init(lowercase__ ) else: raise ValueError("""Either arr or size must be specified""" ) def __UpperCamelCase (self , lowercase__ ): snake_case_ : Optional[Any] = len(lowercase__ ) snake_case_ : int = deepcopy(lowercase__ ) for i in range(1 , self.size ): snake_case_ : Optional[Any] = self.next_(lowercase__ ) if j < self.size: self.tree[j] += self.tree[i] def __UpperCamelCase (self ): snake_case_ : Dict = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): snake_case_ : Optional[int] = self.next_(lowercase__ ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def __UpperCamelCase (lowercase__ ): return index + (index & (-index)) @staticmethod def __UpperCamelCase (lowercase__ ): return index - (index & (-index)) def __UpperCamelCase (self , lowercase__ , lowercase__ ): if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value snake_case_ : Tuple = self.next_(lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ ): self.add(lowercase__ , value - self.get(lowercase__ ) ) def __UpperCamelCase (self , lowercase__ ): if right == 0: return 0 snake_case_ : List[str] = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] snake_case_ : Optional[int] = self.prev(lowercase__ ) return result def __UpperCamelCase (self , lowercase__ , lowercase__ ): return self.prefix(lowercase__ ) - self.prefix(lowercase__ ) def __UpperCamelCase (self , lowercase__ ): return self.query(lowercase__ , index + 1 ) def __UpperCamelCase (self , lowercase__ ): value -= self.tree[0] if value < 0: return -1 snake_case_ : Tuple = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 snake_case_ : Tuple = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
48
1
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" if ( (cp >= 0x4e00 and cp <= 0x9fff) or (cp >= 0x3400 and cp <= 0x4dbf) # or (cp >= 0x20000 and cp <= 0x2a6df) # or (cp >= 0x2a700 and cp <= 0x2b73f) # or (cp >= 0x2b740 and cp <= 0x2b81f) # or (cp >= 0x2b820 and cp <= 0x2ceaf) # or (cp >= 0xf900 and cp <= 0xfaff) or (cp >= 0x2f800 and cp <= 0x2fa1f) # ): # return True return False def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" for char in word: snake_case_ : int = ord(SCREAMING_SNAKE_CASE__ ) if not _is_chinese_char(SCREAMING_SNAKE_CASE__ ): return 0 return 1 def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" snake_case_ : Any = set() for token in tokens: snake_case_ : List[Any] = len(SCREAMING_SNAKE_CASE__ ) > 1 and is_chinese(SCREAMING_SNAKE_CASE__ ) if chinese_word: word_set.add(SCREAMING_SNAKE_CASE__ ) snake_case_ : Optional[Any] = list(SCREAMING_SNAKE_CASE__ ) return word_list def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : set() ): """simple docstring""" if not chinese_word_set: return bert_tokens snake_case_ : Dict = max([len(SCREAMING_SNAKE_CASE__ ) for w in chinese_word_set] ) snake_case_ : Optional[Any] = bert_tokens snake_case_ , snake_case_ : int = 0, len(SCREAMING_SNAKE_CASE__ ) while start < end: snake_case_ : Union[str, Any] = True if is_chinese(bert_word[start] ): snake_case_ : Dict = min(end - start , SCREAMING_SNAKE_CASE__ ) for i in range(SCREAMING_SNAKE_CASE__ , 1 , -1 ): snake_case_ : str = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): snake_case_ : List[str] = """##""" + bert_word[j] snake_case_ : Tuple = start + i snake_case_ : str = False break if single_word: start += 1 return bert_word def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : LTP , SCREAMING_SNAKE_CASE__ : BertTokenizer ): """simple docstring""" snake_case_ : Optional[Any] = [] for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 1_0_0 ): snake_case_ : Union[str, Any] = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=["""cws"""] ).cws snake_case_ : List[Any] = [get_chinese_word(SCREAMING_SNAKE_CASE__ ) for r in res] ltp_res.extend(SCREAMING_SNAKE_CASE__ ) assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) snake_case_ : Dict = [] for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 1_0_0 ): snake_case_ : Union[str, Any] = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=5_1_2 ) bert_res.extend(res["""input_ids"""] ) assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) snake_case_ : Dict = [] for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ : Optional[Any] = [] for id in input_ids: snake_case_ : Optional[int] = bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) input_tokens.append(SCREAMING_SNAKE_CASE__ ) snake_case_ : Optional[int] = add_sub_symbol(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ : str = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(SCREAMING_SNAKE_CASE__ ): if token[:2] == "##": snake_case_ : str = token[2:] # save chinese tokens' pos if len(SCREAMING_SNAKE_CASE__ ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE__ ) ): ref_id.append(SCREAMING_SNAKE_CASE__ ) ref_ids.append(SCREAMING_SNAKE_CASE__ ) assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) return ref_ids def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: snake_case_ : Optional[Any] = f.readlines() snake_case_ : Optional[Any] = [line.strip() for line in data if len(SCREAMING_SNAKE_CASE__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' snake_case_ : Tuple = LTP(args.ltp ) # faster in GPU device snake_case_ : Optional[Any] = BertTokenizer.from_pretrained(args.bert ) snake_case_ : Optional[int] = prepare_ref(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: snake_case_ : Union[str, Any] = [json.dumps(SCREAMING_SNAKE_CASE__ ) + """\n""" for ref in ref_ids] f.writelines(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": a_ = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', required=False, type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', required=False, type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''', ) parser.add_argument( '''--bert''', required=False, type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''', ) parser.add_argument( '''--save_path''', required=False, type=str, default='''./resources/ref.txt''', help='''path to save res''', ) a_ = parser.parse_args() main(args)
48
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list ): """simple docstring""" snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ ) for i in range(1 , SCREAMING_SNAKE_CASE__ ): snake_case_ : Tuple = collection[i] snake_case_ : Tuple = 0 snake_case_ : str = i - 1 while low <= high: snake_case_ : Optional[int] = (low + high) // 2 if val < collection[mid]: snake_case_ : List[str] = mid - 1 else: snake_case_ : str = mid + 1 for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ): snake_case_ : List[str] = collection[j - 1] snake_case_ : Any = val return collection if __name__ == "__main__": a_ = input('''Enter numbers separated by a comma:\n''').strip() a_ = [int(item) for item in user_input.split(''',''')] print(binary_insertion_sort(unsorted))
48
1
"""simple docstring""" import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase): """simple docstring""" _A : Union[str, Any] = AutoencoderKL _A : Optional[int] = """sample""" _A : Optional[int] = 1e-2 @property def __UpperCamelCase (self ): snake_case_ : Union[str, Any] = 4 snake_case_ : Tuple = 3 snake_case_ : Any = (32, 32) snake_case_ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase__ ) return {"sample": image} @property def __UpperCamelCase (self ): return (3, 32, 32) @property def __UpperCamelCase (self ): return (3, 32, 32) def __UpperCamelCase (self ): snake_case_ : List[str] = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } snake_case_ : Optional[int] = self.dummy_input return init_dict, inputs_dict def __UpperCamelCase (self ): pass def __UpperCamelCase (self ): pass @unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" ) def __UpperCamelCase (self ): # enable deterministic behavior for gradient checkpointing snake_case_ , snake_case_ : Optional[int] = self.prepare_init_args_and_inputs_for_common() snake_case_ : Optional[Any] = self.model_class(**lowercase__ ) model.to(lowercase__ ) assert not model.is_gradient_checkpointing and model.training snake_case_ : Optional[Any] = model(**lowercase__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() snake_case_ : Any = torch.randn_like(lowercase__ ) snake_case_ : Dict = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing snake_case_ : Dict = self.model_class(**lowercase__ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(lowercase__ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training snake_case_ : List[Any] = model_a(**lowercase__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() snake_case_ : Any = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) snake_case_ : Any = dict(model.named_parameters() ) snake_case_ : List[Any] = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) ) def __UpperCamelCase (self ): snake_case_ , snake_case_ : Optional[int] = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(lowercase__ ) snake_case_ : List[Any] = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def __UpperCamelCase (self ): snake_case_ : Tuple = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ) snake_case_ : Any = model.to(lowercase__ ) model.eval() if torch_device == "mps": snake_case_ : List[str] = torch.manual_seed(0 ) else: snake_case_ : List[Any] = torch.Generator(device=lowercase__ ).manual_seed(0 ) snake_case_ : List[str] = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) snake_case_ : str = image.to(lowercase__ ) with torch.no_grad(): snake_case_ : List[Any] = model(lowercase__ , sample_posterior=lowercase__ , generator=lowercase__ ).sample snake_case_ : Any = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": snake_case_ : str = torch.tensor( [ -4.0_078e-01, -3.8_323e-04, -1.2_681e-01, -1.1_462e-01, 2.0_095e-01, 1.0_893e-01, -8.8_247e-02, -3.0_361e-01, -9.8_644e-03, ] ) elif torch_device == "cpu": snake_case_ : int = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: snake_case_ : Optional[Any] = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(lowercase__ , lowercase__ , rtol=1e-2 ) ) @slow class __lowercase ( unittest.TestCase): """simple docstring""" def __UpperCamelCase (self , lowercase__ , lowercase__ ): return f'gaussian_noise_s={seed}_shape={"_".join([str(lowercase__ ) for s in shape] )}.npy' def __UpperCamelCase (self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase (self , lowercase__=0 , lowercase__=(4, 3, 5_12, 5_12) , lowercase__=False ): snake_case_ : int = torch.floataa if fpaa else torch.floataa snake_case_ : Dict = torch.from_numpy(load_hf_numpy(self.get_file_format(lowercase__ , lowercase__ ) ) ).to(lowercase__ ).to(lowercase__ ) return image def __UpperCamelCase (self , lowercase__="CompVis/stable-diffusion-v1-4" , lowercase__=False ): snake_case_ : str = """fp16""" if fpaa else None snake_case_ : Dict = torch.floataa if fpaa else torch.floataa snake_case_ : Optional[Any] = AutoencoderKL.from_pretrained( lowercase__ , subfolder="""vae""" , torch_dtype=lowercase__ , revision=lowercase__ , ) model.to(lowercase__ ).eval() return model def __UpperCamelCase (self , lowercase__=0 ): if torch_device == "mps": return torch.manual_seed(lowercase__ ) return torch.Generator(device=lowercase__ ).manual_seed(lowercase__ ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Optional[int] = self.get_sd_vae_model() snake_case_ : int = self.get_sd_image(lowercase__ ) snake_case_ : Optional[Any] = self.get_generator(lowercase__ ) with torch.no_grad(): snake_case_ : Any = model(lowercase__ , generator=lowercase__ , sample_posterior=lowercase__ ).sample assert sample.shape == image.shape snake_case_ : Any = sample[-1, -2:, -2:, :2].flatten().float().cpu() snake_case_ : Dict = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice ) assert torch_all_close(lowercase__ , lowercase__ , atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def __UpperCamelCase (self , lowercase__ , lowercase__ ): snake_case_ : Dict = self.get_sd_vae_model(fpaa=lowercase__ ) snake_case_ : List[str] = self.get_sd_image(lowercase__ , fpaa=lowercase__ ) snake_case_ : Dict = self.get_generator(lowercase__ ) with torch.no_grad(): snake_case_ : Optional[Any] = model(lowercase__ , generator=lowercase__ , sample_posterior=lowercase__ ).sample assert sample.shape == image.shape snake_case_ : List[str] = sample[-1, -2:, :2, -2:].flatten().float().cpu() snake_case_ : Tuple = torch.tensor(lowercase__ ) assert torch_all_close(lowercase__ , lowercase__ , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Optional[int] = self.get_sd_vae_model() snake_case_ : Tuple = self.get_sd_image(lowercase__ ) with torch.no_grad(): snake_case_ : Dict = model(lowercase__ ).sample assert sample.shape == image.shape snake_case_ : Union[str, Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu() snake_case_ : int = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice ) assert torch_all_close(lowercase__ , lowercase__ , atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def __UpperCamelCase (self , lowercase__ , lowercase__ ): snake_case_ : List[Any] = self.get_sd_vae_model() snake_case_ : Union[str, Any] = self.get_sd_image(lowercase__ , shape=(3, 4, 64, 64) ) with torch.no_grad(): snake_case_ : Dict = model.decode(lowercase__ ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] snake_case_ : Any = sample[-1, -2:, :2, -2:].flatten().cpu() snake_case_ : List[Any] = torch.tensor(lowercase__ ) assert torch_all_close(lowercase__ , lowercase__ , atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def __UpperCamelCase (self , lowercase__ , lowercase__ ): snake_case_ : Optional[Any] = self.get_sd_vae_model(fpaa=lowercase__ ) snake_case_ : Union[str, Any] = self.get_sd_image(lowercase__ , shape=(3, 4, 64, 64) , fpaa=lowercase__ ) with torch.no_grad(): snake_case_ : List[Any] = model.decode(lowercase__ ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] snake_case_ : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu() snake_case_ : Dict = torch.tensor(lowercase__ ) assert torch_all_close(lowercase__ , lowercase__ , atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" ) def __UpperCamelCase (self , lowercase__ ): snake_case_ : Union[str, Any] = self.get_sd_vae_model(fpaa=lowercase__ ) snake_case_ : Optional[int] = self.get_sd_image(lowercase__ , shape=(3, 4, 64, 64) , fpaa=lowercase__ ) with torch.no_grad(): snake_case_ : Any = model.decode(lowercase__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): snake_case_ : List[Any] = model.decode(lowercase__ ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] assert torch_all_close(lowercase__ , lowercase__ , atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" ) def __UpperCamelCase (self , lowercase__ ): snake_case_ : int = self.get_sd_vae_model() snake_case_ : Any = self.get_sd_image(lowercase__ , shape=(3, 4, 64, 64) ) with torch.no_grad(): snake_case_ : List[Any] = model.decode(lowercase__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): snake_case_ : Optional[Any] = model.decode(lowercase__ ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] assert torch_all_close(lowercase__ , lowercase__ , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def __UpperCamelCase (self , lowercase__ , lowercase__ ): snake_case_ : Union[str, Any] = self.get_sd_vae_model() snake_case_ : Union[str, Any] = self.get_sd_image(lowercase__ ) snake_case_ : int = self.get_generator(lowercase__ ) with torch.no_grad(): snake_case_ : int = model.encode(lowercase__ ).latent_dist snake_case_ : Dict = dist.sample(generator=lowercase__ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] snake_case_ : Any = sample[0, -1, -3:, -3:].flatten().cpu() snake_case_ : int = torch.tensor(lowercase__ ) snake_case_ : Tuple = 3e-3 if torch_device != """mps""" else 1e-2 assert torch_all_close(lowercase__ , lowercase__ , atol=lowercase__ )
48
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Union[str, Any] = ["""image_processor""", """tokenizer"""] _A : str = """ChineseCLIPImageProcessor""" _A : Tuple = ("""BertTokenizer""", """BertTokenizerFast""") def __init__(self , lowercase__=None , lowercase__=None , **lowercase__ ): snake_case_ : Any = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , lowercase__ , ) snake_case_ : Optional[Any] = kwargs.pop("""feature_extractor""" ) snake_case_ : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(lowercase__ , lowercase__ ) snake_case_ : Union[str, Any] = self.image_processor def __call__(self , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ): if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: snake_case_ : Any = self.tokenizer(lowercase__ , return_tensors=lowercase__ , **lowercase__ ) if images is not None: snake_case_ : Tuple = self.image_processor(lowercase__ , return_tensors=lowercase__ , **lowercase__ ) if text is not None and images is not None: snake_case_ : List[Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ ) def __UpperCamelCase (self , *lowercase__ , **lowercase__ ): return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ ) def __UpperCamelCase (self , *lowercase__ , **lowercase__ ): return self.tokenizer.decode(*lowercase__ , **lowercase__ ) @property def __UpperCamelCase (self ): snake_case_ : Optional[int] = self.tokenizer.model_input_names snake_case_ : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __UpperCamelCase (self ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase__ , ) return self.image_processor_class
48
1
"""simple docstring""" import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __lowercase : """simple docstring""" def __init__(self , lowercase__ , lowercase__=13 , lowercase__=32 , lowercase__=2 , lowercase__=3 , lowercase__=16 , lowercase__=[1, 2, 1] , lowercase__=[2, 2, 4] , lowercase__=2 , lowercase__=2.0 , lowercase__=True , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.1 , lowercase__="gelu" , lowercase__=False , lowercase__=True , lowercase__=0.02 , lowercase__=1e-5 , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=10 , lowercase__=8 , ): snake_case_ : int = parent snake_case_ : Union[str, Any] = batch_size snake_case_ : List[Any] = image_size snake_case_ : int = patch_size snake_case_ : List[str] = num_channels snake_case_ : Dict = embed_dim snake_case_ : Union[str, Any] = depths snake_case_ : Any = num_heads snake_case_ : Optional[int] = window_size snake_case_ : Union[str, Any] = mlp_ratio snake_case_ : Any = qkv_bias snake_case_ : Tuple = hidden_dropout_prob snake_case_ : Optional[Any] = attention_probs_dropout_prob snake_case_ : Optional[int] = drop_path_rate snake_case_ : Tuple = hidden_act snake_case_ : Optional[Any] = use_absolute_embeddings snake_case_ : List[Any] = patch_norm snake_case_ : Optional[int] = layer_norm_eps snake_case_ : Optional[Any] = initializer_range snake_case_ : str = is_training snake_case_ : Optional[int] = scope snake_case_ : Union[str, Any] = use_labels snake_case_ : int = type_sequence_label_size snake_case_ : str = encoder_stride def __UpperCamelCase (self ): snake_case_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : Tuple = None if self.use_labels: snake_case_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : int = self.get_config() return config, pixel_values, labels def __UpperCamelCase (self ): return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Tuple = SwinvaModel(config=lowercase__ ) model.to(lowercase__ ) model.eval() snake_case_ : Union[str, Any] = model(lowercase__ ) snake_case_ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) snake_case_ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Optional[Any] = SwinvaForMaskedImageModeling(config=lowercase__ ) model.to(lowercase__ ) model.eval() snake_case_ : str = model(lowercase__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images snake_case_ : str = 1 snake_case_ : List[str] = SwinvaForMaskedImageModeling(lowercase__ ) model.to(lowercase__ ) model.eval() snake_case_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ : Optional[Any] = model(lowercase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : List[Any] = self.type_sequence_label_size snake_case_ : Optional[Any] = SwinvaForImageClassification(lowercase__ ) model.to(lowercase__ ) model.eval() snake_case_ : Any = model(lowercase__ , labels=lowercase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __UpperCamelCase (self ): snake_case_ : int = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ : Dict = config_and_inputs snake_case_ : Dict = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase): """simple docstring""" _A : str = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) _A : Optional[Any] = ( {"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification} if is_torch_available() else {} ) _A : List[Any] = False _A : Union[str, Any] = False _A : Optional[Any] = False _A : Tuple = False def __UpperCamelCase (self ): snake_case_ : int = SwinvaModelTester(self ) snake_case_ : Union[str, Any] = ConfigTester(self , config_class=lowercase__ , embed_dim=37 ) def __UpperCamelCase (self ): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __UpperCamelCase (self ): snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase__ ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def __UpperCamelCase (self ): pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def __UpperCamelCase (self ): pass def __UpperCamelCase (self ): snake_case_ , snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : List[str] = model_class(lowercase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) ) def __UpperCamelCase (self ): snake_case_ , snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Optional[int] = model_class(lowercase__ ) snake_case_ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : int = [*signature.parameters.keys()] snake_case_ : Dict = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowercase__ ) def __UpperCamelCase (self ): snake_case_ , snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Any = True for model_class in self.all_model_classes: snake_case_ : Union[str, Any] = True snake_case_ : str = False snake_case_ : Any = True snake_case_ : Tuple = model_class(lowercase__ ) model.to(lowercase__ ) model.eval() with torch.no_grad(): snake_case_ : Optional[int] = model(**self._prepare_for_class(lowercase__ , lowercase__ ) ) snake_case_ : List[str] = outputs.attentions snake_case_ : List[str] = len(self.model_tester.depths ) self.assertEqual(len(lowercase__ ) , lowercase__ ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case_ : List[Any] = True snake_case_ : str = config.window_size**2 snake_case_ : Any = model_class(lowercase__ ) model.to(lowercase__ ) model.eval() with torch.no_grad(): snake_case_ : Any = model(**self._prepare_for_class(lowercase__ , lowercase__ ) ) snake_case_ : Optional[int] = outputs.attentions self.assertEqual(len(lowercase__ ) , lowercase__ ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) snake_case_ : int = len(lowercase__ ) # Check attention is always last and order is fine snake_case_ : Tuple = True snake_case_ : Any = True snake_case_ : List[Any] = model_class(lowercase__ ) model.to(lowercase__ ) model.eval() with torch.no_grad(): snake_case_ : str = model(**self._prepare_for_class(lowercase__ , lowercase__ ) ) if hasattr(self.model_tester , """num_hidden_states_types""" ): snake_case_ : str = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states snake_case_ : List[Any] = 2 self.assertEqual(out_len + added_hidden_states , len(lowercase__ ) ) snake_case_ : Any = outputs.attentions self.assertEqual(len(lowercase__ ) , lowercase__ ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Any = model_class(lowercase__ ) model.to(lowercase__ ) model.eval() with torch.no_grad(): snake_case_ : Union[str, Any] = model(**self._prepare_for_class(lowercase__ , lowercase__ ) ) snake_case_ : List[str] = outputs.hidden_states snake_case_ : List[Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(lowercase__ ) , lowercase__ ) # Swinv2 has a different seq_length snake_case_ : List[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) snake_case_ : Union[str, Any] = outputs.reshaped_hidden_states self.assertEqual(len(lowercase__ ) , lowercase__ ) snake_case_ , snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = reshaped_hidden_states[0].shape snake_case_ : List[str] = ( reshaped_hidden_states[0].view(lowercase__ , lowercase__ , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def __UpperCamelCase (self ): snake_case_ , snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Dict = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: snake_case_ : Optional[int] = True self.check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ : str = True self.check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) def __UpperCamelCase (self ): snake_case_ , snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[Any] = 3 snake_case_ : Optional[int] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) snake_case_ : List[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) snake_case_ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) snake_case_ : int = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: snake_case_ : Optional[int] = True self.check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ : List[str] = True self.check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ , (padded_height, padded_width) ) def __UpperCamelCase (self ): snake_case_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowercase__ ) def __UpperCamelCase (self ): snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase__ ) @slow def __UpperCamelCase (self ): for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : Dict = SwinvaModel.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) def __UpperCamelCase (self ): snake_case_ , snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[Any] = _config_zero_init(lowercase__ ) for model_class in self.all_model_classes: snake_case_ : Union[str, Any] = model_class(config=lowercase__ ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @require_vision @require_torch class __lowercase ( unittest.TestCase): """simple docstring""" @cached_property def __UpperCamelCase (self ): return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def __UpperCamelCase (self ): snake_case_ : Optional[int] = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( lowercase__ ) snake_case_ : Any = self.default_image_processor snake_case_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) snake_case_ : int = image_processor(images=lowercase__ , return_tensors="""pt""" ).to(lowercase__ ) # forward pass with torch.no_grad(): snake_case_ : Optional[int] = model(**lowercase__ ) # verify the logits snake_case_ : Any = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , lowercase__ ) snake_case_ : str = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(lowercase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4 ) )
48
"""simple docstring""" import argparse import copy def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" snake_case_ : List[Any] = {} with open(SCREAMING_SNAKE_CASE__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: snake_case_ : int = [] _list.append([line.split()[1], line.split()[2]] ) snake_case_ : Optional[Any] = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: snake_case_ : str = [] _list.append([line.split()[0], line.split()[2]] ) snake_case_ : Optional[Any] = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" with open(SCREAMING_SNAKE_CASE__ ) as f: snake_case_ : Optional[Any] = f.read(1 ) snake_case_ : Union[str, Any] = start_node snake_case_ : Dict = [] snake_case_ : Union[str, Any] = start_node snake_case_ : Tuple = 0 while visiting not in first_solution: snake_case_ : int = 1_0_0_0_0 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution: snake_case_ : Union[str, Any] = k[1] snake_case_ : Any = k[0] first_solution.append(SCREAMING_SNAKE_CASE__ ) snake_case_ : Tuple = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ ) snake_case_ : List[str] = best_node first_solution.append(SCREAMING_SNAKE_CASE__ ) snake_case_ : Optional[Any] = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 snake_case_ : int = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0_0_0_0 ) return first_solution, distance_of_first_solution def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" snake_case_ : Union[str, Any] = [] for n in solution[1:-1]: snake_case_ : str = solution.index(SCREAMING_SNAKE_CASE__ ) for kn in solution[1:-1]: snake_case_ : Tuple = solution.index(SCREAMING_SNAKE_CASE__ ) if n == kn: continue snake_case_ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ ) snake_case_ : int = kn snake_case_ : Dict = n snake_case_ : Optional[int] = 0 for k in _tmp[:-1]: snake_case_ : Dict = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: snake_case_ : Dict = distance + int(i[1] ) _tmp.append(SCREAMING_SNAKE_CASE__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) snake_case_ : Optional[Any] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ): """simple docstring""" snake_case_ : Dict = 1 snake_case_ : List[Any] = first_solution snake_case_ : List[Any] = [] snake_case_ : Optional[Any] = distance_of_first_solution snake_case_ : Dict = solution while count <= iters: snake_case_ : List[str] = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ : List[Any] = 0 snake_case_ : List[Any] = neighborhood[index_of_best_solution] snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1 snake_case_ : List[str] = False while not found: snake_case_ : Tuple = 0 while i < len(SCREAMING_SNAKE_CASE__ ): if best_solution[i] != solution[i]: snake_case_ : Optional[Any] = best_solution[i] snake_case_ : int = solution[i] break snake_case_ : List[str] = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) snake_case_ : Tuple = True snake_case_ : Dict = best_solution[:-1] snake_case_ : Tuple = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: snake_case_ : Tuple = cost snake_case_ : Union[str, Any] = solution else: snake_case_ : str = index_of_best_solution + 1 snake_case_ : Tuple = neighborhood[index_of_best_solution] if len(SCREAMING_SNAKE_CASE__ ) >= size: tabu_list.pop(0 ) snake_case_ : List[str] = count + 1 return best_solution_ever, best_cost def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ): """simple docstring""" snake_case_ : Tuple = generate_neighbours(args.File ) snake_case_ , snake_case_ : Optional[Any] = generate_first_solution( args.File , SCREAMING_SNAKE_CASE__ ) snake_case_ , snake_case_ : Dict = tabu_search( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , ) print(f'Best solution: {best_sol}, with total distance: {best_cost}.' ) if __name__ == "__main__": a_ = argparse.ArgumentParser(description='''Tabu Search''') parser.add_argument( '''-f''', '''--File''', type=str, help='''Path to the file containing the data''', required=True, ) parser.add_argument( '''-i''', '''--Iterations''', type=int, help='''How many iterations the algorithm should perform''', required=True, ) parser.add_argument( '''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True ) # Pass the arguments to main method main(parser.parse_args())
48
1
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a_ = { '''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['''VivitImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ '''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''VivitModel''', '''VivitPreTrainedModel''', '''VivitForVideoClassification''', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
48
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings a_ = r''' [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: title_sep (`str`, *optional*, defaults to `" / "`): Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`]. doc_sep (`str`, *optional*, defaults to `" // "`): Separator inserted between the text of the retrieved document and the original input when calling [`RagRetriever`]. n_docs (`int`, *optional*, defaults to 5): Number of documents to retrieve. max_combined_length (`int`, *optional*, defaults to 300): Max length of contextualized input returned by [`~RagRetriever.__call__`]. retrieval_vector_size (`int`, *optional*, defaults to 768): Dimensionality of the document embeddings indexed by [`RagRetriever`]. retrieval_batch_size (`int`, *optional*, defaults to 8): Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated [`RagRetriever`]. dataset (`str`, *optional*, defaults to `"wiki_dpr"`): A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids using `datasets.list_datasets()`). dataset_split (`str`, *optional*, defaults to `"train"`) Which split of the `dataset` to load. index_name (`str`, *optional*, defaults to `"compressed"`) The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and `"compressed"`. index_path (`str`, *optional*) The path to the serialized faiss index on disk. passages_path (`str`, *optional*): A path to text passages compatible with the faiss index. Required if using [`~models.rag.retrieval_rag.LegacyIndex`] use_dummy_dataset (`bool`, *optional*, defaults to `False`) Whether to load a "dummy" variant of the dataset specified by `dataset`. label_smoothing (`float`, *optional*, defaults to 0.0): Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. do_marginalize (`bool`, *optional*, defaults to `False`): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. reduce_loss (`bool`, *optional*, defaults to `False`): Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation. do_deduplication (`bool`, *optional*, defaults to `True`): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. exclude_bos_score (`bool`, *optional*, defaults to `False`): Whether or not to disregard the BOS token when computing the loss. output_retrieved(`bool`, *optional*, defaults to `False`): If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask` are returned. See returned tensors for more detail. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. ''' @add_start_docstrings(_UpperCAmelCase) class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Optional[int] = """rag""" _A : Optional[Any] = True def __init__(self , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=" / " , lowercase__=" // " , lowercase__=5 , lowercase__=3_00 , lowercase__=7_68 , lowercase__=8 , lowercase__="wiki_dpr" , lowercase__="train" , lowercase__="compressed" , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=None , **lowercase__ , ): super().__init__( bos_token_id=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , prefix=lowercase__ , vocab_size=lowercase__ , **lowercase__ , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" snake_case_ : List[Any] = kwargs.pop("""question_encoder""" ) snake_case_ : Tuple = question_encoder_config.pop("""model_type""" ) snake_case_ : List[str] = kwargs.pop("""generator""" ) snake_case_ : List[str] = decoder_config.pop("""model_type""" ) from ..auto.configuration_auto import AutoConfig snake_case_ : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ ) snake_case_ : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ ) snake_case_ : int = reduce_loss snake_case_ : Optional[int] = label_smoothing snake_case_ : Dict = exclude_bos_score snake_case_ : Union[str, Any] = do_marginalize snake_case_ : Union[str, Any] = title_sep snake_case_ : int = doc_sep snake_case_ : int = n_docs snake_case_ : List[str] = max_combined_length snake_case_ : Tuple = dataset snake_case_ : int = dataset_split snake_case_ : str = index_name snake_case_ : List[str] = retrieval_vector_size snake_case_ : Dict = retrieval_batch_size snake_case_ : str = passages_path snake_case_ : Union[str, Any] = index_path snake_case_ : Tuple = use_dummy_dataset snake_case_ : Dict = output_retrieved snake_case_ : str = do_deduplication snake_case_ : Any = use_cache if self.forced_eos_token_id is None: snake_case_ : Any = getattr(self.generator , """forced_eos_token_id""" , lowercase__ ) @classmethod def __UpperCamelCase (cls , lowercase__ , lowercase__ , **lowercase__ ): return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase__ ) def __UpperCamelCase (self ): snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ ) snake_case_ : Any = self.question_encoder.to_dict() snake_case_ : Dict = self.generator.to_dict() snake_case_ : Union[str, Any] = self.__class__.model_type return output
48
1
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list ): """simple docstring""" snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ ) for i in range(1 , SCREAMING_SNAKE_CASE__ ): snake_case_ : Tuple = collection[i] snake_case_ : Tuple = 0 snake_case_ : str = i - 1 while low <= high: snake_case_ : Optional[int] = (low + high) // 2 if val < collection[mid]: snake_case_ : List[str] = mid - 1 else: snake_case_ : str = mid + 1 for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ): snake_case_ : List[str] = collection[j - 1] snake_case_ : Any = val return collection if __name__ == "__main__": a_ = input('''Enter numbers separated by a comma:\n''').strip() a_ = [int(item) for item in user_input.split(''',''')] print(binary_insertion_sort(unsorted))
48
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING a_ = logging.get_logger(__name__) class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Optional[int] = """upernet""" def __init__(self , lowercase__=None , lowercase__=5_12 , lowercase__=0.02 , lowercase__=[1, 2, 3, 6] , lowercase__=True , lowercase__=0.4 , lowercase__=3_84 , lowercase__=2_56 , lowercase__=1 , lowercase__=False , lowercase__=2_55 , **lowercase__ , ): super().__init__(**lowercase__ ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) snake_case_ : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(lowercase__ , lowercase__ ): snake_case_ : Tuple = backbone_config.get("""model_type""" ) snake_case_ : List[str] = CONFIG_MAPPING[backbone_model_type] snake_case_ : List[Any] = config_class.from_dict(lowercase__ ) snake_case_ : List[Any] = backbone_config snake_case_ : Optional[Any] = hidden_size snake_case_ : Any = initializer_range snake_case_ : str = pool_scales snake_case_ : Dict = use_auxiliary_head snake_case_ : str = auxiliary_loss_weight snake_case_ : List[str] = auxiliary_in_channels snake_case_ : Optional[Any] = auxiliary_channels snake_case_ : Any = auxiliary_num_convs snake_case_ : List[Any] = auxiliary_concat_input snake_case_ : List[str] = loss_ignore_index def __UpperCamelCase (self ): snake_case_ : Dict = copy.deepcopy(self.__dict__ ) snake_case_ : Union[str, Any] = self.backbone_config.to_dict() snake_case_ : Any = self.__class__.model_type return output
48
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mobilebert import MobileBertTokenizer a_ = logging.get_logger(__name__) a_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} a_ = { '''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''}, '''tokenizer_file''': { '''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json''' }, } a_ = {'''mobilebert-uncased''': 512} a_ = {} class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : List[Any] = VOCAB_FILES_NAMES _A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _A : Tuple = PRETRAINED_INIT_CONFIGURATION _A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A : List[Any] = MobileBertTokenizer def __init__(self , lowercase__=None , lowercase__=None , lowercase__=True , lowercase__="[UNK]" , lowercase__="[SEP]" , lowercase__="[PAD]" , lowercase__="[CLS]" , lowercase__="[MASK]" , lowercase__=True , lowercase__=None , **lowercase__ , ): super().__init__( lowercase__ , tokenizer_file=lowercase__ , do_lower_case=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , tokenize_chinese_chars=lowercase__ , strip_accents=lowercase__ , **lowercase__ , ) snake_case_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , lowercase__ ) != do_lower_case or normalizer_state.get("""strip_accents""" , lowercase__ ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , lowercase__ ) != tokenize_chinese_chars ): snake_case_ : Optional[Any] = getattr(lowercase__ , normalizer_state.pop("""type""" ) ) snake_case_ : Any = do_lower_case snake_case_ : Any = strip_accents snake_case_ : Optional[int] = tokenize_chinese_chars snake_case_ : Optional[int] = normalizer_class(**lowercase__ ) snake_case_ : Optional[int] = do_lower_case def __UpperCamelCase (self , lowercase__ , lowercase__=None ): snake_case_ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __UpperCamelCase (self , lowercase__ , lowercase__ = None ): snake_case_ : Dict = [self.sep_token_id] snake_case_ : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCamelCase (self , lowercase__ , lowercase__ = None ): snake_case_ : Union[str, Any] = self._tokenizer.model.save(lowercase__ , name=lowercase__ ) return tuple(lowercase__ )
48
"""simple docstring""" import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask a_ = logging.getLogger(__name__) class __lowercase ( _UpperCAmelCase): """simple docstring""" def __init__(self , lowercase__=-1 ): # in NER datasets, the last column is usually reserved for NER label snake_case_ : Union[str, Any] = label_idx def __UpperCamelCase (self , lowercase__ , lowercase__ ): if isinstance(lowercase__ , lowercase__ ): snake_case_ : List[str] = mode.value snake_case_ : List[Any] = os.path.join(lowercase__ , f'{mode}.txt' ) snake_case_ : Tuple = 1 snake_case_ : Any = [] with open(lowercase__ , encoding="""utf-8""" ) as f: snake_case_ : str = [] snake_case_ : List[Any] = [] for line in f: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) ) guid_index += 1 snake_case_ : Optional[Any] = [] snake_case_ : int = [] else: snake_case_ : Optional[Any] = line.split(""" """ ) words.append(splits[0] ) if len(lowercase__ ) > 1: labels.append(splits[self.label_idx].replace("""\n""" , """""" ) ) else: # Examples could have no label for mode = "test" labels.append("""O""" ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) ) return examples def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : str = 0 for line in test_input_reader: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": writer.write(lowercase__ ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: snake_case_ : Optional[int] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n""" writer.write(lowercase__ ) else: logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] ) def __UpperCamelCase (self , lowercase__ ): if path: with open(lowercase__ , """r""" ) as f: snake_case_ : Dict = f.read().splitlines() if "O" not in labels: snake_case_ : List[Any] = ["""O"""] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class __lowercase ( _UpperCAmelCase): """simple docstring""" def __init__(self ): # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def __UpperCamelCase (self , lowercase__ ): if path: with open(lowercase__ , """r""" ) as f: snake_case_ : Any = f.read().splitlines() if "O" not in labels: snake_case_ : Tuple = ["""O"""] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class __lowercase ( _UpperCAmelCase): """simple docstring""" def __UpperCamelCase (self , lowercase__ , lowercase__ ): if isinstance(lowercase__ , lowercase__ ): snake_case_ : List[Any] = mode.value snake_case_ : Optional[int] = os.path.join(lowercase__ , f'{mode}.txt' ) snake_case_ : Tuple = 1 snake_case_ : str = [] with open(lowercase__ , encoding="""utf-8""" ) as f: for sentence in parse_incr(lowercase__ ): snake_case_ : Tuple = [] snake_case_ : Any = [] for token in sentence: words.append(token["""form"""] ) labels.append(token["""upos"""] ) assert len(lowercase__ ) == len(lowercase__ ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) ) guid_index += 1 return examples def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Dict = 0 for sentence in parse_incr(lowercase__ ): snake_case_ : int = preds_list[example_id] snake_case_ : Dict = """""" for token in sentence: out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ' out += "\n" writer.write(lowercase__ ) example_id += 1 def __UpperCamelCase (self , lowercase__ ): if path: with open(lowercase__ , """r""" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
48
1
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" snake_case_ : list[list[str]] = [[] for _ in range(SCREAMING_SNAKE_CASE__ )] snake_case_ : Optional[Any] = key - 1 if key <= 0: raise ValueError("""Height of grid can't be 0 or negative""" ) if key == 1 or len(SCREAMING_SNAKE_CASE__ ) <= key: return input_string for position, character in enumerate(SCREAMING_SNAKE_CASE__ ): snake_case_ : List[Any] = position % (lowest * 2) # puts it in bounds snake_case_ : Dict = min(SCREAMING_SNAKE_CASE__ , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(SCREAMING_SNAKE_CASE__ ) snake_case_ : Union[str, Any] = ["""""".join(SCREAMING_SNAKE_CASE__ ) for row in temp_grid] snake_case_ : Dict = """""".join(SCREAMING_SNAKE_CASE__ ) return output_string def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" snake_case_ : List[str] = [] snake_case_ : Optional[int] = key - 1 if key <= 0: raise ValueError("""Height of grid can't be 0 or negative""" ) if key == 1: return input_string snake_case_ : list[list[str]] = [[] for _ in range(SCREAMING_SNAKE_CASE__ )] # generates template for position in range(len(SCREAMING_SNAKE_CASE__ ) ): snake_case_ : Dict = position % (lowest * 2) # puts it in bounds snake_case_ : Optional[Any] = min(SCREAMING_SNAKE_CASE__ , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append("""*""" ) snake_case_ : Tuple = 0 for row in temp_grid: # fills in the characters snake_case_ : Any = input_string[counter : counter + len(SCREAMING_SNAKE_CASE__ )] grid.append(list(SCREAMING_SNAKE_CASE__ ) ) counter += len(SCREAMING_SNAKE_CASE__ ) snake_case_ : Optional[Any] = """""" # reads as zigzag for position in range(len(SCREAMING_SNAKE_CASE__ ) ): snake_case_ : Dict = position % (lowest * 2) # puts it in bounds snake_case_ : Optional[int] = min(SCREAMING_SNAKE_CASE__ , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" snake_case_ : List[str] = {} for key_guess in range(1 , len(SCREAMING_SNAKE_CASE__ ) ): # tries every key snake_case_ : Optional[Any] = decrypt(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return results if __name__ == "__main__": import doctest doctest.testmod()
48
"""simple docstring""" import random def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" snake_case_ : Union[str, Any] = num - 1 snake_case_ : List[str] = 0 while s % 2 == 0: snake_case_ : str = s // 2 t += 1 for _ in range(5 ): snake_case_ : List[Any] = random.randrange(2 , num - 1 ) snake_case_ : Dict = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if v != 1: snake_case_ : int = 0 while v != (num - 1): if i == t - 1: return False else: snake_case_ : str = i + 1 snake_case_ : int = (v**2) % num return True def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" if num < 2: return False snake_case_ : Dict = [ 2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1, 4_3, 4_7, 5_3, 5_9, 6_1, 6_7, 7_1, 7_3, 7_9, 8_3, 8_9, 9_7, 1_0_1, 1_0_3, 1_0_7, 1_0_9, 1_1_3, 1_2_7, 1_3_1, 1_3_7, 1_3_9, 1_4_9, 1_5_1, 1_5_7, 1_6_3, 1_6_7, 1_7_3, 1_7_9, 1_8_1, 1_9_1, 1_9_3, 1_9_7, 1_9_9, 2_1_1, 2_2_3, 2_2_7, 2_2_9, 2_3_3, 2_3_9, 2_4_1, 2_5_1, 2_5_7, 2_6_3, 2_6_9, 2_7_1, 2_7_7, 2_8_1, 2_8_3, 2_9_3, 3_0_7, 3_1_1, 3_1_3, 3_1_7, 3_3_1, 3_3_7, 3_4_7, 3_4_9, 3_5_3, 3_5_9, 3_6_7, 3_7_3, 3_7_9, 3_8_3, 3_8_9, 3_9_7, 4_0_1, 4_0_9, 4_1_9, 4_2_1, 4_3_1, 4_3_3, 4_3_9, 4_4_3, 4_4_9, 4_5_7, 4_6_1, 4_6_3, 4_6_7, 4_7_9, 4_8_7, 4_9_1, 4_9_9, 5_0_3, 5_0_9, 5_2_1, 5_2_3, 5_4_1, 5_4_7, 5_5_7, 5_6_3, 5_6_9, 5_7_1, 5_7_7, 5_8_7, 5_9_3, 5_9_9, 6_0_1, 6_0_7, 6_1_3, 6_1_7, 6_1_9, 6_3_1, 6_4_1, 6_4_3, 6_4_7, 6_5_3, 6_5_9, 6_6_1, 6_7_3, 6_7_7, 6_8_3, 6_9_1, 7_0_1, 7_0_9, 7_1_9, 7_2_7, 7_3_3, 7_3_9, 7_4_3, 7_5_1, 7_5_7, 7_6_1, 7_6_9, 7_7_3, 7_8_7, 7_9_7, 8_0_9, 8_1_1, 8_2_1, 8_2_3, 8_2_7, 8_2_9, 8_3_9, 8_5_3, 8_5_7, 8_5_9, 8_6_3, 8_7_7, 8_8_1, 8_8_3, 8_8_7, 9_0_7, 9_1_1, 9_1_9, 9_2_9, 9_3_7, 9_4_1, 9_4_7, 9_5_3, 9_6_7, 9_7_1, 9_7_7, 9_8_3, 9_9_1, 9_9_7, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ): """simple docstring""" while True: snake_case_ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) ) if is_prime_low_num(SCREAMING_SNAKE_CASE__ ): return num if __name__ == "__main__": a_ = generate_large_prime() print(('''Prime number:''', num)) print(('''is_prime_low_num:''', is_prime_low_num(num)))
48
1
"""simple docstring""" from math import sqrt def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" snake_case_ : Tuple = 0 for i in range(1 , int(sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) ): if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE__ ): total += i + n // i elif i == sqrt(SCREAMING_SNAKE_CASE__ ): total += i return total - n def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 1_0_0_0_0 ): """simple docstring""" snake_case_ : str = sum( i for i in range(1 , SCREAMING_SNAKE_CASE__ ) if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE__ ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE__ ) != i ) return total if __name__ == "__main__": print(solution(int(str(input()).strip())))
48
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType a_ = logging.get_logger(__name__) a_ = { '''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''', '''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''', '''microsoft/deberta-v2-xlarge-mnli''': ( '''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json''' ), '''microsoft/deberta-v2-xxlarge-mnli''': ( '''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json''' ), } class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Dict = """deberta-v2""" def __init__(self , lowercase__=12_81_00 , lowercase__=15_36 , lowercase__=24 , lowercase__=24 , lowercase__=61_44 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=0 , lowercase__=0.02 , lowercase__=1e-7 , lowercase__=False , lowercase__=-1 , lowercase__=0 , lowercase__=True , lowercase__=None , lowercase__=0 , lowercase__="gelu" , **lowercase__ , ): super().__init__(**lowercase__ ) snake_case_ : Union[str, Any] = hidden_size snake_case_ : str = num_hidden_layers snake_case_ : Tuple = num_attention_heads snake_case_ : Dict = intermediate_size snake_case_ : Optional[int] = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Any = attention_probs_dropout_prob snake_case_ : List[Any] = max_position_embeddings snake_case_ : Union[str, Any] = type_vocab_size snake_case_ : Union[str, Any] = initializer_range snake_case_ : List[Any] = relative_attention snake_case_ : Dict = max_relative_positions snake_case_ : Optional[int] = pad_token_id snake_case_ : List[str] = position_biased_input # Backwards compatibility if type(lowercase__ ) == str: snake_case_ : Union[str, Any] = [x.strip() for x in pos_att_type.lower().split("""|""" )] snake_case_ : Optional[int] = pos_att_type snake_case_ : List[str] = vocab_size snake_case_ : Tuple = layer_norm_eps snake_case_ : List[Any] = kwargs.get("""pooler_hidden_size""" , lowercase__ ) snake_case_ : List[str] = pooler_dropout snake_case_ : int = pooler_hidden_act class __lowercase ( _UpperCAmelCase): """simple docstring""" @property def __UpperCamelCase (self ): if self.task == "multiple-choice": snake_case_ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: snake_case_ : int = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def __UpperCamelCase (self ): return 12 def __UpperCamelCase (self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , lowercase__ = 3 , lowercase__ = 40 , lowercase__ = 40 , lowercase__ = None , ): snake_case_ : str = super().generate_dummy_inputs(preprocessor=lowercase__ , framework=lowercase__ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
48
1
"""simple docstring""" import re def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" if len(re.findall("""[ATCG]""" , SCREAMING_SNAKE_CASE__ ) ) != len(SCREAMING_SNAKE_CASE__ ): raise ValueError("""Invalid Strand""" ) return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) ) if __name__ == "__main__": import doctest doctest.testmod()
48
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
48
1
"""simple docstring""" from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 4 ): """simple docstring""" snake_case_ : Union[str, Any] = abs(SCREAMING_SNAKE_CASE__ ) or 4 return [[1 + x + y * row_size for x in range(SCREAMING_SNAKE_CASE__ )] for y in range(SCREAMING_SNAKE_CASE__ )] def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[list[int]] ): """simple docstring""" return reverse_row(transpose(SCREAMING_SNAKE_CASE__ ) ) # OR.. transpose(reverse_column(matrix)) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[list[int]] ): """simple docstring""" return reverse_row(reverse_column(SCREAMING_SNAKE_CASE__ ) ) # OR.. reverse_column(reverse_row(matrix)) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[list[int]] ): """simple docstring""" return reverse_column(transpose(SCREAMING_SNAKE_CASE__ ) ) # OR.. transpose(reverse_row(matrix)) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[list[int]] ): """simple docstring""" snake_case_ : str = [list(SCREAMING_SNAKE_CASE__ ) for x in zip(*SCREAMING_SNAKE_CASE__ )] return matrix def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[list[int]] ): """simple docstring""" snake_case_ : str = matrix[::-1] return matrix def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[list[int]] ): """simple docstring""" snake_case_ : Union[str, Any] = [x[::-1] for x in matrix] return matrix def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list[list[int]] ): """simple docstring""" for i in matrix: print(*SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": a_ = make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 90 counterclockwise:\n''') print_matrix(rotate_aa(matrix)) a_ = make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 180:\n''') print_matrix(rotate_aaa(matrix)) a_ = make_matrix() print('''\norigin:\n''') print_matrix(matrix) print('''\nrotate 270 counterclockwise:\n''') print_matrix(rotate_aaa(matrix))
48
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin a_ = get_tests_dir('''fixtures/test_sentencepiece.model''') a_ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''} a_ = '''>>zh<<''' a_ = '''Helsinki-NLP/''' if is_torch_available(): a_ = '''pt''' elif is_tf_available(): a_ = '''tf''' else: a_ = '''jax''' @require_sentencepiece class __lowercase ( _UpperCAmelCase , unittest.TestCase): """simple docstring""" _A : str = MarianTokenizer _A : List[str] = False _A : List[str] = True def __UpperCamelCase (self ): super().setUp() snake_case_ : Optional[int] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] snake_case_ : Any = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) ) snake_case_ : Any = Path(self.tmpdirname ) save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] ) save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] ) copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] ) snake_case_ : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __UpperCamelCase (self , **lowercase__ ): return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ ) def __UpperCamelCase (self , lowercase__ ): return ( "This is a test", "This is a test", ) def __UpperCamelCase (self ): snake_case_ : Union[str, Any] = """</s>""" snake_case_ : Tuple = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ ) def __UpperCamelCase (self ): snake_case_ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """</s>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(lowercase__ ) , 9 ) def __UpperCamelCase (self ): self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def __UpperCamelCase (self ): snake_case_ : Any = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' ) snake_case_ : Tuple = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) snake_case_ : Dict = [38, 1_21, 14, 6_97, 3_88_48, 0] self.assertListEqual(lowercase__ , batch.input_ids[0] ) snake_case_ : Tuple = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(lowercase__ ) snake_case_ : str = [x.name for x in Path(lowercase__ ).glob("""*""" )] self.assertIn("""source.spm""" , lowercase__ ) MarianTokenizer.from_pretrained(lowercase__ ) def __UpperCamelCase (self ): snake_case_ : Union[str, Any] = self.get_tokenizer() snake_case_ : List[str] = tok( ["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(batch.input_ids.shape , (2, 5_12) ) def __UpperCamelCase (self ): snake_case_ : Tuple = self.get_tokenizer() snake_case_ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def __UpperCamelCase (self ): # fmt: off snake_case_ : str = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , ) def __UpperCamelCase (self ): snake_case_ : Any = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" ) snake_case_ : Dict = """Tämä on testi""" snake_case_ : List[Any] = """This is a test""" snake_case_ : Optional[int] = [76, 7, 20_47, 2] snake_case_ : List[str] = [69, 12, 11, 9_40, 2] snake_case_ : Any = tokenizer(lowercase__ ).input_ids self.assertListEqual(lowercase__ , lowercase__ ) snake_case_ : str = tokenizer(text_target=lowercase__ ).input_ids self.assertListEqual(lowercase__ , lowercase__ ) snake_case_ : int = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ ) self.assertEqual(lowercase__ , lowercase__ )
48
1
"""simple docstring""" import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : dict ): """simple docstring""" return (data["data"], data["target"]) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray ): """simple docstring""" snake_case_ : Any = XGBClassifier() classifier.fit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return classifier def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : Any = load_iris() snake_case_ , snake_case_ : Tuple = data_handling(SCREAMING_SNAKE_CASE__ ) snake_case_ , snake_case_ , snake_case_ , snake_case_ : str = train_test_split( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , test_size=0.25 ) snake_case_ : Union[str, Any] = iris["""target_names"""] # Create an XGBoost Classifier from the training data snake_case_ : Optional[int] = xgboost(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , display_labels=SCREAMING_SNAKE_CASE__ , cmap="""Blues""" , normalize="""true""" , ) plt.title("""Normalized Confusion Matrix - IRIS Dataset""" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
48
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=_UpperCAmelCase) class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True}) _A : ClassVar[Features] = Features({"""audio""": Audio()}) _A : ClassVar[Features] = Features({"""transcription""": Value("""string""")}) _A : str = "audio" _A : str = "transcription" def __UpperCamelCase (self , lowercase__ ): if self.audio_column not in features: raise ValueError(f'Column {self.audio_column} is not present in features.' ) if not isinstance(features[self.audio_column] , lowercase__ ): raise ValueError(f'Column {self.audio_column} is not an Audio type.' ) snake_case_ : Optional[int] = copy.deepcopy(self ) snake_case_ : Tuple = self.input_schema.copy() snake_case_ : List[str] = features[self.audio_column] snake_case_ : Any = input_schema return task_template @property def __UpperCamelCase (self ): return {self.audio_column: "audio", self.transcription_column: "transcription"}
48
1
"""simple docstring""" import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __lowercase : """simple docstring""" def __init__(self , lowercase__ , lowercase__=99 , lowercase__=13 , lowercase__=16 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=2 , lowercase__=32 , lowercase__=4 , lowercase__=4 , lowercase__=30 , lowercase__=0 , lowercase__=1 , lowercase__=2 , lowercase__=None , ): snake_case_ : Tuple = parent snake_case_ : Tuple = batch_size snake_case_ : List[str] = decoder_seq_length # For common tests snake_case_ : List[str] = self.decoder_seq_length snake_case_ : List[Any] = is_training snake_case_ : List[Any] = use_attention_mask snake_case_ : Dict = use_labels snake_case_ : Union[str, Any] = vocab_size snake_case_ : List[str] = d_model snake_case_ : str = d_model snake_case_ : List[str] = decoder_layers snake_case_ : Any = decoder_layers snake_case_ : int = decoder_ffn_dim snake_case_ : Tuple = decoder_attention_heads snake_case_ : Union[str, Any] = decoder_attention_heads snake_case_ : Optional[int] = eos_token_id snake_case_ : Dict = bos_token_id snake_case_ : int = pad_token_id snake_case_ : List[Any] = decoder_start_token_id snake_case_ : Union[str, Any] = use_cache snake_case_ : List[str] = max_position_embeddings snake_case_ : Union[str, Any] = None snake_case_ : Tuple = decoder_seq_length snake_case_ : int = 2 snake_case_ : Optional[Any] = 1 def __UpperCamelCase (self ): snake_case_ : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) snake_case_ : Dict = None if self.use_attention_mask: snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) snake_case_ : Tuple = None if self.use_labels: snake_case_ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) snake_case_ : int = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ): snake_case_ : Optional[int] = True snake_case_ : Optional[int] = TrOCRDecoder(config=lowercase__ ).to(lowercase__ ).eval() snake_case_ : List[str] = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass snake_case_ : Tuple = model(lowercase__ , use_cache=lowercase__ ) snake_case_ : Optional[Any] = model(lowercase__ ) snake_case_ : Optional[Any] = model(lowercase__ , use_cache=lowercase__ ) self.parent.assertTrue(len(lowercase__ ) == len(lowercase__ ) ) self.parent.assertTrue(len(lowercase__ ) == len(lowercase__ ) + 1 ) snake_case_ : List[str] = outputs["""past_key_values"""] # create hypothetical next token and extent to next_input_ids snake_case_ : int = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and snake_case_ : Dict = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case_ : Optional[Any] = model(lowercase__ )["""last_hidden_state"""] snake_case_ : List[Any] = model(lowercase__ , past_key_values=lowercase__ )["""last_hidden_state"""] # select random slice snake_case_ : str = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case_ : List[str] = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() snake_case_ : str = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(lowercase__ , lowercase__ , atol=1e-3 ) def __UpperCamelCase (self ): snake_case_ : Optional[Any] = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[str] = config_and_inputs snake_case_ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_torch class __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase): """simple docstring""" _A : Optional[int] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () _A : Any = (TrOCRForCausalLM,) if is_torch_available() else () _A : Optional[Any] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {} _A : Optional[int] = True _A : int = False def __UpperCamelCase (self ): snake_case_ : Dict = TrOCRStandaloneDecoderModelTester(self , is_training=lowercase__ ) snake_case_ : Tuple = ConfigTester(self , config_class=lowercase__ ) def __UpperCamelCase (self ): pass def __UpperCamelCase (self ): pass def __UpperCamelCase (self ): pass def __UpperCamelCase (self ): self.config_tester.run_common_tests() def __UpperCamelCase (self ): snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*lowercase__ ) def __UpperCamelCase (self ): return @unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :) def __UpperCamelCase (self ): pass
48
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL a_ = logging.get_logger(__name__) class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : int = ["""pixel_values"""] def __init__(self , lowercase__ = True , lowercase__ = None , lowercase__ = 0.9 , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = 1 / 2_55 , lowercase__ = True , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ): super().__init__(**lowercase__ ) snake_case_ : Tuple = size if size is not None else {"""shortest_edge""": 2_24} snake_case_ : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ ) snake_case_ : str = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} snake_case_ : Dict = get_size_dict(lowercase__ , param_name="""crop_size""" ) snake_case_ : Union[str, Any] = do_resize snake_case_ : List[str] = size snake_case_ : str = crop_pct snake_case_ : str = resample snake_case_ : Optional[Any] = do_center_crop snake_case_ : Dict = crop_size snake_case_ : int = do_rescale snake_case_ : Optional[int] = rescale_factor snake_case_ : str = do_normalize snake_case_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN snake_case_ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ): snake_case_ : Tuple = get_size_dict(lowercase__ , default_to_square=lowercase__ ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' ) if crop_pct is not None: if "shortest_edge" in size: snake_case_ : Optional[int] = int(size["""shortest_edge"""] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: snake_case_ : Dict = int(size["""height"""] / crop_pct ) else: snake_case_ : List[str] = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct )) else: raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) ) snake_case_ : List[Any] = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ ) else: if "shortest_edge" in size: snake_case_ : Optional[int] = get_resize_output_image_size(lowercase__ , size=size["""shortest_edge"""] , default_to_square=lowercase__ ) elif "height" in size and "width" in size: snake_case_ : int = (size["""height"""], size["""width"""]) else: raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) ) return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ): snake_case_ : int = get_size_dict(lowercase__ ) if "height" not in size or "width" not in size: raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' ) return center_crop(lowercase__ , size=(size["""height"""], size["""width"""]) , data_format=lowercase__ , **lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ): return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ): return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ): snake_case_ : str = do_resize if do_resize is not None else self.do_resize snake_case_ : Any = crop_pct if crop_pct is not None else self.crop_pct snake_case_ : List[Any] = resample if resample is not None else self.resample snake_case_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ : str = do_normalize if do_normalize is not None else self.do_normalize snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean snake_case_ : int = image_std if image_std is not None else self.image_std snake_case_ : List[Any] = size if size is not None else self.size snake_case_ : Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ ) snake_case_ : List[Any] = crop_size if crop_size is not None else self.crop_size snake_case_ : int = get_size_dict(lowercase__ , param_name="""crop_size""" ) snake_case_ : List[str] = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_pct is None: raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. snake_case_ : int = [to_numpy_array(lowercase__ ) for image in images] if do_resize: snake_case_ : str = [self.resize(image=lowercase__ , size=lowercase__ , crop_pct=lowercase__ , resample=lowercase__ ) for image in images] if do_center_crop: snake_case_ : Optional[int] = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images] if do_rescale: snake_case_ : List[Any] = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images] if do_normalize: snake_case_ : Optional[Any] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images] snake_case_ : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images] snake_case_ : Dict = {"""pixel_values""": images} return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
48
1
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict ): """simple docstring""" for param in module.parameters(): snake_case_ : Tuple = False def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : List[Any] = """cuda""" if torch.cuda.is_available() else """cpu""" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): snake_case_ : Optional[int] = """mps""" if device == "mps": print( """WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch""" """ errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues""" """ with generations.""" ) return device def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ): """simple docstring""" snake_case_ : Union[str, Any] = plt.imshow(SCREAMING_SNAKE_CASE__ ) fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE__ ) fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE__ ) plt.show() def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : Union[str, Any] = datetime.now() snake_case_ : int = current_time.strftime("""%H:%M:%S""" ) return timestamp
48
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: a_ = None a_ = logging.get_logger(__name__) a_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} a_ = { '''vocab_file''': { '''facebook/mbart-large-en-ro''': ( '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model''' ), '''facebook/mbart-large-cc25''': ( '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''', '''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''', }, } a_ = { '''facebook/mbart-large-en-ro''': 1024, '''facebook/mbart-large-cc25''': 1024, } # fmt: off a_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN'''] class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Dict = VOCAB_FILES_NAMES _A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _A : str = ["""input_ids""", """attention_mask"""] _A : Tuple = MBartTokenizer _A : List[int] = [] _A : List[int] = [] def __init__(self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ): # Mask token behave like a normal word, i.e. include the space before it snake_case_ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token super().__init__( vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , ) snake_case_ : Dict = vocab_file snake_case_ : Optional[int] = False if not self.vocab_file else True snake_case_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} ) snake_case_ : Any = { lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } snake_case_ : Tuple = src_lang if src_lang is not None else """en_XX""" snake_case_ : Tuple = self.convert_tokens_to_ids(self._src_lang ) snake_case_ : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __UpperCamelCase (self ): return self._src_lang @src_lang.setter def __UpperCamelCase (self , lowercase__ ): snake_case_ : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __UpperCamelCase (self , lowercase__ , lowercase__ = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __UpperCamelCase (self , lowercase__ , lowercase__ = None ): snake_case_ : List[Any] = [self.sep_token_id] snake_case_ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ): if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) snake_case_ : int = src_lang snake_case_ : List[str] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ ) snake_case_ : List[str] = self.convert_tokens_to_ids(lowercase__ ) snake_case_ : Union[str, Any] = tgt_lang_id return inputs def __UpperCamelCase (self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ): snake_case_ : List[str] = src_lang snake_case_ : int = tgt_lang return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ ) def __UpperCamelCase (self ): return self.set_src_lang_special_tokens(self.src_lang ) def __UpperCamelCase (self ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __UpperCamelCase (self , lowercase__ ): snake_case_ : int = self.convert_tokens_to_ids(lowercase__ ) snake_case_ : Tuple = [] snake_case_ : List[Any] = [self.eos_token_id, self.cur_lang_code] snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens ) snake_case_ : Optional[int] = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __UpperCamelCase (self , lowercase__ ): snake_case_ : Tuple = self.convert_tokens_to_ids(lowercase__ ) snake_case_ : Optional[int] = [] snake_case_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code] snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) snake_case_ : int = self.convert_ids_to_tokens(self.suffix_tokens ) snake_case_ : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __UpperCamelCase (self , lowercase__ , lowercase__ = None ): if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(lowercase__ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory.' ) return snake_case_ : List[str] = os.path.join( lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ): copyfile(self.vocab_file , lowercase__ ) return (out_vocab_file,)
48
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" snake_case_ : int = SwinvaConfig() snake_case_ : Union[str, Any] = swinva_name.split("""_""" ) snake_case_ : List[Any] = name_split[1] if "to" in name_split[3]: snake_case_ : Union[str, Any] = int(name_split[3][-3:] ) else: snake_case_ : Optional[int] = int(name_split[3] ) if "to" in name_split[2]: snake_case_ : Dict = int(name_split[2][-2:] ) else: snake_case_ : int = int(name_split[2][6:] ) if model_size == "tiny": snake_case_ : str = 9_6 snake_case_ : Dict = (2, 2, 6, 2) snake_case_ : int = (3, 6, 1_2, 2_4) elif model_size == "small": snake_case_ : Tuple = 9_6 snake_case_ : Optional[Any] = (2, 2, 1_8, 2) snake_case_ : Optional[int] = (3, 6, 1_2, 2_4) elif model_size == "base": snake_case_ : Dict = 1_2_8 snake_case_ : str = (2, 2, 1_8, 2) snake_case_ : List[Any] = (4, 8, 1_6, 3_2) else: snake_case_ : Tuple = 1_9_2 snake_case_ : Tuple = (2, 2, 1_8, 2) snake_case_ : Optional[Any] = (6, 1_2, 2_4, 4_8) if "to" in swinva_name: snake_case_ : str = (1_2, 1_2, 1_2, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): snake_case_ : str = 2_1_8_4_1 snake_case_ : str = """huggingface/label-files""" snake_case_ : List[str] = """imagenet-22k-id2label.json""" snake_case_ : Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) ) snake_case_ : Optional[int] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} snake_case_ : int = idalabel snake_case_ : Optional[Any] = {v: k for k, v in idalabel.items()} else: snake_case_ : int = 1_0_0_0 snake_case_ : List[Any] = """huggingface/label-files""" snake_case_ : List[Any] = """imagenet-1k-id2label.json""" snake_case_ : Union[str, Any] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) ) snake_case_ : Union[str, Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} snake_case_ : Dict = idalabel snake_case_ : Optional[int] = {v: k for k, v in idalabel.items()} snake_case_ : Dict = img_size snake_case_ : int = num_classes snake_case_ : str = embed_dim snake_case_ : Dict = depths snake_case_ : int = num_heads snake_case_ : Optional[Any] = window_size return config def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" if "patch_embed.proj" in name: snake_case_ : Union[str, Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: snake_case_ : Union[str, Any] = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: snake_case_ : List[str] = """encoder.""" + name if "attn.proj" in name: snake_case_ : Dict = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: snake_case_ : Dict = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: snake_case_ : List[Any] = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: snake_case_ : Dict = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: snake_case_ : int = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: snake_case_ : List[Any] = name.replace("""mlp.fc2""" , """output.dense""" ) if "q_bias" in name: snake_case_ : Optional[Any] = name.replace("""q_bias""" , """query.bias""" ) if "k_bias" in name: snake_case_ : List[Any] = name.replace("""k_bias""" , """key.bias""" ) if "v_bias" in name: snake_case_ : int = name.replace("""v_bias""" , """value.bias""" ) if "cpb_mlp" in name: snake_case_ : str = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" ) if name == "norm.weight": snake_case_ : Tuple = """layernorm.weight""" if name == "norm.bias": snake_case_ : int = """layernorm.bias""" if "head" in name: snake_case_ : Union[str, Any] = name.replace("""head""" , """classifier""" ) else: snake_case_ : Optional[Any] = """swinv2.""" + name return name def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): """simple docstring""" for key in orig_state_dict.copy().keys(): snake_case_ : Tuple = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ ) if "mask" in key: continue elif "qkv" in key: snake_case_ : List[Any] = key.split(""".""" ) snake_case_ : str = int(key_split[1] ) snake_case_ : Optional[Any] = int(key_split[3] ) snake_case_ : List[Any] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: snake_case_ : Any = val[:dim, :] snake_case_ : Optional[Any] = val[dim : dim * 2, :] snake_case_ : int = val[-dim:, :] else: snake_case_ : List[str] = val[:dim] snake_case_ : Tuple = val[ dim : dim * 2 ] snake_case_ : Union[str, Any] = val[-dim:] else: snake_case_ : int = val return orig_state_dict def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" snake_case_ : Optional[Any] = timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ ) timm_model.eval() snake_case_ : Any = get_swinva_config(SCREAMING_SNAKE_CASE__ ) snake_case_ : List[str] = SwinvaForImageClassification(SCREAMING_SNAKE_CASE__ ) model.eval() snake_case_ : Any = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE__ ) model.load_state_dict(SCREAMING_SNAKE_CASE__ ) snake_case_ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" , """-""" ) ) ) snake_case_ : Dict = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ) snake_case_ : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ) snake_case_ : Optional[int] = timm_model(inputs["""pixel_values"""] ) snake_case_ : List[str] = model(**SCREAMING_SNAKE_CASE__ ).logits assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) print(f'Saving model {swinva_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) model.push_to_hub( repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="""nandwalritik""" , commit_message="""Add model""" , ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swinv2_name''', default='''swinv2_tiny_patch4_window8_256''', type=str, help='''Name of the Swinv2 timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) a_ = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
48
"""simple docstring""" import argparse import hashlib # hashlib is only used inside the Test class import struct class __lowercase : """simple docstring""" def __init__(self , lowercase__ ): snake_case_ : Union[str, Any] = data snake_case_ : List[str] = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0] @staticmethod def __UpperCamelCase (lowercase__ , lowercase__ ): return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f def __UpperCamelCase (self ): snake_case_ : Any = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64) snake_case_ : Tuple = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) ) return padded_data def __UpperCamelCase (self ): return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def __UpperCamelCase (self , lowercase__ ): snake_case_ : int = list(struct.unpack(""">16L""" , lowercase__ ) ) + [0] * 64 for i in range(16 , 80 ): snake_case_ : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def __UpperCamelCase (self ): snake_case_ : List[Any] = self.padding() snake_case_ : Any = self.split_blocks() for block in self.blocks: snake_case_ : Any = self.expand_block(lowercase__ ) snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = self.h for i in range(0 , 80 ): if 0 <= i < 20: snake_case_ : Optional[Any] = (b & c) | ((~b) & d) snake_case_ : List[str] = 0X5_a_8_2_7_9_9_9 elif 20 <= i < 40: snake_case_ : Union[str, Any] = b ^ c ^ d snake_case_ : Tuple = 0X6_e_d_9_e_b_a_1 elif 40 <= i < 60: snake_case_ : str = (b & c) | (b & d) | (c & d) snake_case_ : List[str] = 0X8_f_1_b_b_c_d_c elif 60 <= i < 80: snake_case_ : Tuple = b ^ c ^ d snake_case_ : str = 0Xc_a_6_2_c_1_d_6 snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[Any] = ( self.rotate(lowercase__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f, a, self.rotate(lowercase__ , 30 ), c, d, ) snake_case_ : Any = ( self.h[0] + a & 0Xf_f_f_f_f_f_f_f, self.h[1] + b & 0Xf_f_f_f_f_f_f_f, self.h[2] + c & 0Xf_f_f_f_f_f_f_f, self.h[3] + d & 0Xf_f_f_f_f_f_f_f, self.h[4] + e & 0Xf_f_f_f_f_f_f_f, ) return ("{:08x}" * 5).format(*self.h ) def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : Union[str, Any] = b"""Test String""" assert SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE__ ).hexdigest() # noqa: S324 def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : int = argparse.ArgumentParser(description="""Process some strings or files""" ) parser.add_argument( """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) snake_case_ : Optional[int] = parser.parse_args() snake_case_ : Optional[int] = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: snake_case_ : List[str] = f.read() else: snake_case_ : Dict = bytes(SCREAMING_SNAKE_CASE__ , """utf-8""" ) print(SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
48
1
"""simple docstring""" import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( """files""" , [ ["""full:README.md""", """dataset_infos.json"""], ["""empty:README.md""", """dataset_infos.json"""], ["""dataset_infos.json"""], ["""full:README.md"""], ] , ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" snake_case_ : Tuple = tmp_path_factory.mktemp("""dset_infos_dir""" ) if "full:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""---\ndataset_info:\n dataset_size: 42\n---""" ) if "empty:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""""" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f: f.write("""{\"default\": {\"dataset_size\": 42}}""" ) snake_case_ : int = DatasetInfosDict.from_directory(SCREAMING_SNAKE_CASE__ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 4_2 @pytest.mark.parametrize( """dataset_info""" , [ DatasetInfo(), DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , ), ] , ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : DatasetInfo ): """simple docstring""" snake_case_ : Optional[int] = str(SCREAMING_SNAKE_CASE__ ) dataset_info.write_to_directory(SCREAMING_SNAKE_CASE__ ) snake_case_ : Optional[Any] = DatasetInfo.from_directory(SCREAMING_SNAKE_CASE__ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """dataset_info.json""" ) ) def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : Dict = DatasetInfo( description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , ) snake_case_ : Optional[Any] = dataset_info._to_yaml_dict() assert sorted(SCREAMING_SNAKE_CASE__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) snake_case_ : List[Any] = yaml.safe_dump(SCREAMING_SNAKE_CASE__ ) snake_case_ : Union[str, Any] = yaml.safe_load(SCREAMING_SNAKE_CASE__ ) assert dataset_info_yaml_dict == reloaded def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : str = DatasetInfo() snake_case_ : Optional[int] = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( """dataset_infos_dict""" , [ DatasetInfosDict(), DatasetInfosDict({"""default""": DatasetInfo()} ), DatasetInfosDict({"""my_config_name""": DatasetInfo()} ), DatasetInfosDict( { """default""": DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , ) } ), DatasetInfosDict( { """v1""": DatasetInfo(dataset_size=4_2 ), """v2""": DatasetInfo(dataset_size=1_3_3_7 ), } ), ] , ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : DatasetInfosDict ): """simple docstring""" snake_case_ : Dict = str(SCREAMING_SNAKE_CASE__ ) dataset_infos_dict.write_to_directory(SCREAMING_SNAKE_CASE__ ) snake_case_ : Dict = DatasetInfosDict.from_directory(SCREAMING_SNAKE_CASE__ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): snake_case_ : Optional[int] = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml snake_case_ : List[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """README.md""" ) )
48
"""simple docstring""" from manim import * class __lowercase ( _UpperCAmelCase): """simple docstring""" def __UpperCamelCase (self ): snake_case_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 ) snake_case_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )] snake_case_ : str = [mem.copy() for i in range(6 )] snake_case_ : str = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : Any = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : List[str] = VGroup(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : List[Any] = Text("""CPU""" , font_size=24 ) snake_case_ : Tuple = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowercase__ ) snake_case_ : List[Any] = [mem.copy() for i in range(4 )] snake_case_ : Tuple = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : List[str] = Text("""GPU""" , font_size=24 ) snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ ) gpu.move_to([-1, -1, 0] ) self.add(lowercase__ ) snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )] snake_case_ : List[Any] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : Dict = Text("""Model""" , font_size=24 ) snake_case_ : int = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ ) model.move_to([3, -1.0, 0] ) self.add(lowercase__ ) snake_case_ : Dict = [] for i, rect in enumerate(lowercase__ ): rect.set_stroke(lowercase__ ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) snake_case_ : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase__ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase__ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=lowercase__ , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase__ , buff=0.0 ) self.add(lowercase__ ) cpu_targs.append(lowercase__ ) snake_case_ : List[str] = [mem.copy() for i in range(6 )] snake_case_ : List[str] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : str = Text("""Loaded Checkpoint""" , font_size=24 ) snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , aligned_edge=lowercase__ , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) snake_case_ : Optional[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) snake_case_ : Union[str, Any] = MarkupText( f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowercase__ , lowercase__ ) snake_case_ : List[Any] = MarkupText( f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , ) blue_text.next_to(lowercase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) snake_case_ : List[Any] = MarkupText( f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowercase__ ) , Write(lowercase__ ) ) self.play(Write(lowercase__ , run_time=1 ) , Create(lowercase__ , run_time=1 ) ) snake_case_ : Optional[int] = [] snake_case_ : List[str] = [] for i, rect in enumerate(lowercase__ ): snake_case_ : Optional[Any] = fill.copy().set_fill(lowercase__ , opacity=0.7 ) target.move_to(lowercase__ ) first_animations.append(GrowFromCenter(lowercase__ , run_time=1 ) ) snake_case_ : List[Any] = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(lowercase__ , run_time=1.5 ) ) self.play(*lowercase__ ) self.play(*lowercase__ ) self.wait()
48
1
"""simple docstring""" import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP a_ = False try: a_ = _is_package_available('''google.colab''') except ModuleNotFoundError: pass @input.register class __lowercase : """simple docstring""" def __init__(self , lowercase__ = None , lowercase__ = [] ): snake_case_ : str = 0 snake_case_ : Dict = choices snake_case_ : Dict = prompt if sys.platform == "win32": snake_case_ : List[Any] = """*""" else: snake_case_ : Optional[Any] = """➔ """ def __UpperCamelCase (self , lowercase__ , lowercase__ = "" ): if sys.platform != "win32": writeColor(self.choices[index] , 32 , lowercase__ ) else: forceWrite(self.choices[index] , lowercase__ ) def __UpperCamelCase (self , lowercase__ ): if index == self.position: forceWrite(f' {self.arrow_char} ' ) self.write_choice(lowercase__ ) else: forceWrite(f' {self.choices[index]}' ) reset_cursor() def __UpperCamelCase (self , lowercase__ , lowercase__ = 1 ): snake_case_ : Optional[Any] = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(lowercase__ ) move_cursor(lowercase__ , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP["""up"""] ) def __UpperCamelCase (self ): self.move_direction(Direction.UP ) @input.mark(KEYMAP["""down"""] ) def __UpperCamelCase (self ): self.move_direction(Direction.DOWN ) @input.mark(KEYMAP["""newline"""] ) def __UpperCamelCase (self ): move_cursor(len(self.choices ) - self.position , """DOWN""" ) return self.position @input.mark(KEYMAP["""interrupt"""] ) def __UpperCamelCase (self ): move_cursor(len(self.choices ) - self.position , """DOWN""" ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(lowercase__ )] for number in range(10 )] ) def __UpperCamelCase (self ): snake_case_ : Dict = int(chr(self.current_selection ) ) snake_case_ : Optional[int] = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , lowercase__ ) else: return else: return def __UpperCamelCase (self , lowercase__ = 0 ): if self.prompt: linebreak() forceWrite(self.prompt , """\n""" ) if in_colab: forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" ) else: forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" ) snake_case_ : Dict = default_choice for i in range(len(self.choices ) ): self.print_choice(lowercase__ ) forceWrite("""\n""" ) move_cursor(len(self.choices ) - self.position , """UP""" ) with cursor.hide(): while True: if in_colab: try: snake_case_ : List[str] = int(builtins.input() ) except ValueError: snake_case_ : Optional[int] = default_choice else: snake_case_ : List[str] = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , """UP""" ) clear_line() self.write_choice(lowercase__ , """\n""" ) return choice
48
"""simple docstring""" from random import randint from tempfile import TemporaryFile import numpy as np def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" snake_case_ : Union[str, Any] = 0 if start < end: snake_case_ : Union[str, Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ : List[Any] = a[end] snake_case_ : Dict = a[pivot] snake_case_ : Any = temp snake_case_ , snake_case_ : Dict = _in_place_partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , p - 1 ) count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , p + 1 , SCREAMING_SNAKE_CASE__ ) return count def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ): """simple docstring""" snake_case_ : Tuple = 0 snake_case_ : List[Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ : Dict = a[end] snake_case_ : List[Any] = a[pivot] snake_case_ : Optional[Any] = temp snake_case_ : List[str] = start - 1 for index in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value snake_case_ : Any = new_pivot_index + 1 snake_case_ : Tuple = a[new_pivot_index] snake_case_ : Optional[int] = a[index] snake_case_ : Tuple = temp snake_case_ : Union[str, Any] = a[new_pivot_index + 1] snake_case_ : Union[str, Any] = a[end] snake_case_ : Union[str, Any] = temp return new_pivot_index + 1, count a_ = TemporaryFile() a_ = 100 # 1000 elements are to be sorted a_ , a_ = 0, 1 # mean and standard deviation a_ = np.random.normal(mu, sigma, p) np.save(outfile, X) print('''The array is''') print(X) outfile.seek(0) # using the same array a_ = np.load(outfile) a_ = len(M) - 1 a_ = _in_place_quick_sort(M, 0, r) print( '''No of Comparisons for 100 elements selected from a standard normal distribution''' '''is :''' ) print(z)
48
1
"""simple docstring""" import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process a_ = logging.getLogger(__name__) a_ = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) a_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __lowercase : """simple docstring""" _A : Optional[str] = field( default=_UpperCAmelCase , metadata={ """help""": ( """The model checkpoint for weights initialization.Don't set if you want to train a model from scratch.""" ) } , ) _A : Optional[str] = field( default=_UpperCAmelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCAmelCase)} , ) _A : Optional[str] = field( default=_UpperCAmelCase , metadata={ """help""": ( """Override some existing default config settings when a model is trained from scratch. Example: """ """n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index""" ) } , ) _A : Optional[str] = field( default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""}) _A : Optional[str] = field( default=_UpperCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""}) _A : Optional[str] = field( default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) _A : bool = field( default=_UpperCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) _A : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) _A : bool = field( default=_UpperCAmelCase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) def __UpperCamelCase (self ): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( """--config_overrides can't be used in combination with --config_name or --model_name_or_path""" ) @dataclass class __lowercase : """simple docstring""" _A : Optional[str] = field( default=_UpperCAmelCase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""}) _A : Optional[str] = field( default=_UpperCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}) _A : Optional[str] = field(default=_UpperCAmelCase , metadata={"""help""": """The input training data file (a text file)."""}) _A : Optional[str] = field( default=_UpperCAmelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) _A : Optional[str] = field( default=_UpperCAmelCase , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , ) _A : Optional[str] = field( default=_UpperCAmelCase , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , ) _A : bool = field( default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""}) _A : Optional[int] = field( default=5 , metadata={ """help""": """The percentage of the train set used as validation set in case there's no validation split""" } , ) _A : Optional[int] = field( default=_UpperCAmelCase , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated. Default to the max input length of the model.""" ) } , ) _A : Optional[int] = field( default=_UpperCAmelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) _A : float = field( default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""}) _A : bool = field( default=_UpperCAmelCase , metadata={ """help""": ( """Whether to pad all samples to `max_seq_length`. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch.""" ) } , ) def __UpperCamelCase (self ): if self.train_file is not None: snake_case_ : Optional[int] = self.train_file.split(""".""" )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: snake_case_ : List[str] = self.validation_file.split(""".""" )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): """simple docstring""" with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" ) as f: snake_case_ : Dict = [json.loads(SCREAMING_SNAKE_CASE__ ) for line in f.read().splitlines() if (len(SCREAMING_SNAKE_CASE__ ) > 0 and not line.isspace())] assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) snake_case_ : List[Any] = {c: dataset[c] for c in dataset.column_names} snake_case_ : Union[str, Any] = refs return Dataset.from_dict(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. snake_case_ , snake_case_ , snake_case_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: snake_case_ , snake_case_ , snake_case_ : List[str] = parser.parse_args_into_dataclasses() # Detecting last checkpoint. snake_case_ : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: snake_case_ : Dict = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , SCREAMING_SNAKE_CASE__ ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. snake_case_ : Tuple = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): snake_case_ : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f'train[:{data_args.validation_split_percentage}%]' , ) snake_case_ : Any = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=f'train[{data_args.validation_split_percentage}%:]' , ) else: snake_case_ : int = {} if data_args.train_file is not None: snake_case_ : Any = data_args.train_file if data_args.validation_file is not None: snake_case_ : str = data_args.validation_file snake_case_ : Optional[int] = data_args.train_file.split(""".""" )[-1] if extension == "txt": snake_case_ : Union[str, Any] = """text""" snake_case_ : Dict = load_dataset(SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case_ : Any = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: snake_case_ : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **SCREAMING_SNAKE_CASE__ ) elif model_args.model_name_or_path: snake_case_ : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ ) else: snake_case_ : List[Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(f'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(f'New config: {config}' ) snake_case_ : Any = { """cache_dir""": model_args.cache_dir, """use_fast""": model_args.use_fast_tokenizer, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: snake_case_ : Dict = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **SCREAMING_SNAKE_CASE__ ) elif model_args.model_name_or_path: snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ ) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported by this script.""" """You can do it from another script, save it, and load it from here, using --tokenizer_name.""" ) if model_args.model_name_or_path: snake_case_ : List[str] = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) snake_case_ : Optional[Any] = AutoModelForMaskedLM.from_config(SCREAMING_SNAKE_CASE__ ) model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: snake_case_ : List[Any] = datasets["""train"""].column_names else: snake_case_ : Tuple = datasets["""validation"""].column_names snake_case_ : Union[str, Any] = """text""" if """text""" in column_names else column_names[0] snake_case_ : Optional[Any] = """max_length""" if data_args.pad_to_max_length else False def tokenize_function(SCREAMING_SNAKE_CASE__ : Any ): # Remove empty lines snake_case_ : List[Any] = [line for line in examples["""text"""] if len(SCREAMING_SNAKE_CASE__ ) > 0 and not line.isspace()] return tokenizer(examples["""text"""] , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=data_args.max_seq_length ) snake_case_ : Any = datasets.map( SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: snake_case_ : List[Any] = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: snake_case_ : Any = add_chinese_references( tokenized_datasets["""validation"""] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer snake_case_ : str = data_args.train_ref_file or data_args.validation_ref_file if has_ref: snake_case_ : List[str] = False # Data collator # This one will take care of randomly masking the tokens. snake_case_ : List[Any] = DataCollatorForWholeWordMask(tokenizer=SCREAMING_SNAKE_CASE__ , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer snake_case_ : str = Trainer( model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , ) # Training if training_args.do_train: if last_checkpoint is not None: snake_case_ : Optional[int] = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): snake_case_ : str = model_args.model_name_or_path else: snake_case_ : Tuple = None snake_case_ : int = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ ) trainer.save_model() # Saves the tokenizer too for easy upload snake_case_ : Optional[Any] = os.path.join(training_args.output_dir , """train_results.txt""" ) if trainer.is_world_process_zero(): with open(SCREAMING_SNAKE_CASE__ , """w""" ) as writer: logger.info("""***** Train results *****""" ) for key, value in sorted(train_result.metrics.items() ): logger.info(f' {key} = {value}' ) writer.write(f'{key} = {value}\n' ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # Evaluation snake_case_ : Tuple = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) snake_case_ : List[Any] = trainer.evaluate() snake_case_ : Optional[Any] = math.exp(eval_output["""eval_loss"""] ) snake_case_ : List[str] = perplexity snake_case_ : List[Any] = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" ) if trainer.is_world_process_zero(): with open(SCREAMING_SNAKE_CASE__ , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in sorted(results.items() ): logger.info(f' {key} = {value}' ) writer.write(f'{key} = {value}\n' ) return results def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" main() if __name__ == "__main__": main()
48
"""simple docstring""" import random def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : bool = False ): """simple docstring""" snake_case_ : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE__ )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(SCREAMING_SNAKE_CASE__ ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(SCREAMING_SNAKE_CASE__ ): for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ): if random.random() < probability: graph[i].append(SCREAMING_SNAKE_CASE__ ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(SCREAMING_SNAKE_CASE__ ) return graph def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" return { i: [j for j in range(SCREAMING_SNAKE_CASE__ ) if i != j] for i in range(SCREAMING_SNAKE_CASE__ ) } if __name__ == "__main__": import doctest doctest.testmod()
48
1
"""simple docstring""" import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __lowercase : """simple docstring""" def __init__(self , lowercase__ , lowercase__=13 , lowercase__=30 , lowercase__=2 , lowercase__=3 , lowercase__=True , lowercase__=True , lowercase__=32 , lowercase__=5 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=10 , lowercase__=0.02 , lowercase__=None , ): snake_case_ : Dict = parent snake_case_ : List[Any] = batch_size snake_case_ : str = image_size snake_case_ : Any = patch_size snake_case_ : str = num_channels snake_case_ : Optional[Any] = is_training snake_case_ : Union[str, Any] = use_labels snake_case_ : Optional[int] = hidden_size snake_case_ : Optional[int] = num_hidden_layers snake_case_ : int = num_attention_heads snake_case_ : List[Any] = intermediate_size snake_case_ : Dict = hidden_act snake_case_ : List[Any] = hidden_dropout_prob snake_case_ : Optional[int] = attention_probs_dropout_prob snake_case_ : List[Any] = type_sequence_label_size snake_case_ : List[Any] = initializer_range snake_case_ : Tuple = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) snake_case_ : Dict = (image_size // patch_size) ** 2 snake_case_ : List[Any] = num_patches + 1 def __UpperCamelCase (self ): snake_case_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : str = None if self.use_labels: snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : Any = self.get_config() return config, pixel_values, labels def __UpperCamelCase (self ): return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Optional[int] = ViTMSNModel(config=A__ ) model.to(A__ ) model.eval() snake_case_ : int = model(A__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : str = self.type_sequence_label_size snake_case_ : Optional[int] = ViTMSNForImageClassification(A__ ) model.to(A__ ) model.eval() snake_case_ : List[Any] = model(A__ , labels=A__ ) print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" ) print("""Labels: {labels}""" ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case_ : Optional[int] = 1 snake_case_ : Union[str, Any] = ViTMSNForImageClassification(A__ ) model.to(A__ ) model.eval() snake_case_ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ : str = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __UpperCamelCase (self ): snake_case_ : int = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ : Any = config_and_inputs snake_case_ : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __lowercase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase): """simple docstring""" _A : int = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () _A : Union[str, Any] = ( {"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification} if is_torch_available() else {} ) _A : List[Any] = False _A : List[str] = False _A : Optional[Any] = False _A : int = False def __UpperCamelCase (self ): snake_case_ : Dict = ViTMSNModelTester(self ) snake_case_ : int = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 ) def __UpperCamelCase (self ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMSN does not use inputs_embeds""" ) def __UpperCamelCase (self ): pass def __UpperCamelCase (self ): snake_case_ , snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : List[Any] = model_class(A__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A__ , nn.Linear ) ) def __UpperCamelCase (self ): snake_case_ , snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : List[Any] = model_class(A__ ) snake_case_ : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : List[Any] = [*signature.parameters.keys()] snake_case_ : Dict = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , A__ ) def __UpperCamelCase (self ): snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A__ ) def __UpperCamelCase (self ): snake_case_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A__ ) @slow def __UpperCamelCase (self ): for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : Any = ViTMSNModel.from_pretrained(A__ ) self.assertIsNotNone(A__ ) def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __lowercase ( unittest.TestCase): """simple docstring""" @cached_property def __UpperCamelCase (self ): return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None @slow def __UpperCamelCase (self ): torch.manual_seed(2 ) snake_case_ : Optional[int] = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(A__ ) snake_case_ : List[Any] = self.default_image_processor snake_case_ : Optional[int] = prepare_img() snake_case_ : Optional[int] = image_processor(images=A__ , return_tensors="""pt""" ).to(A__ ) # forward pass with torch.no_grad(): snake_case_ : Any = model(**A__ ) # verify the logits snake_case_ : Dict = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , A__ ) snake_case_ : Union[str, Any] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(A__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1e-4 ) )
700
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json''' ), } class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Optional[int] = """dpr""" def __init__(self , lowercase__=3_05_22 , lowercase__=7_68 , lowercase__=12 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__ = 0 , **lowercase__ , ): super().__init__(pad_token_id=lowercase__ , **lowercase__ ) snake_case_ : List[Any] = vocab_size snake_case_ : List[str] = hidden_size snake_case_ : Tuple = num_hidden_layers snake_case_ : List[Any] = num_attention_heads snake_case_ : int = hidden_act snake_case_ : Dict = intermediate_size snake_case_ : int = hidden_dropout_prob snake_case_ : Any = attention_probs_dropout_prob snake_case_ : str = max_position_embeddings snake_case_ : List[str] = type_vocab_size snake_case_ : List[str] = initializer_range snake_case_ : Optional[int] = layer_norm_eps snake_case_ : Union[str, Any] = projection_dim snake_case_ : str = position_embedding_type
48
0
"""simple docstring""" # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def SCREAMING_SNAKE_CASE__ ( *SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" with open(__A , """r""" ) as fh: fcntl.flock(__A , fcntl.LOCK_EX ) try: print(*__A ) finally: fcntl.flock(__A , fcntl.LOCK_UN ) a_ = int(os.environ['''LOCAL_RANK''']) torch.cuda.set_device(local_rank) a_ = torch.device('''cuda''', local_rank) a_ = socket.gethostname() a_ = F'''[{hostname}-{local_rank}]''' try: # test distributed dist.init_process_group('''nccl''') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank a_ = dist.get_rank() a_ = dist.get_world_size() printflock(F'''{gpu} is OK (global rank: {rank}/{world_size})''') dist.barrier() if rank == 0: printflock(F'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''') except Exception: printflock(F'''{gpu} is broken''') raise
701
"""simple docstring""" import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm a_ = re.compile('''[^A-Za-z_0-9]''') # parameters used in DuplicationIndex a_ = 10 a_ = 256 def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" if len(SCREAMING_SNAKE_CASE__ ) < MIN_NUM_TOKENS: return None snake_case_ : Union[str, Any] = MinHash(num_perm=SCREAMING_SNAKE_CASE__ ) for token in set(SCREAMING_SNAKE_CASE__ ): min_hash.update(token.encode() ) return min_hash def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE__ ) if len(t.strip() ) > 0} class __lowercase : """simple docstring""" def __init__(self , *, lowercase__ = 0.85 , ): snake_case_ : Tuple = duplication_jaccard_threshold snake_case_ : Optional[Any] = NUM_PERM snake_case_ : Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) snake_case_ : List[Any] = defaultdict(lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ ): snake_case_ : int = self._index.query(lowercase__ ) if code_key in self._index.keys: print(f'Duplicate key {code_key}' ) return self._index.insert(lowercase__ , lowercase__ ) if len(lowercase__ ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(lowercase__ ) break else: self._duplicate_clusters[close_duplicates[0]].add(lowercase__ ) def __UpperCamelCase (self ): snake_case_ : str = [] for base, duplicates in self._duplicate_clusters.items(): snake_case_ : Optional[Any] = [base] + list(lowercase__ ) # reformat the cluster to be a list of dict snake_case_ : Any = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster] duplicate_clusters.append(lowercase__ ) return duplicate_clusters def __UpperCamelCase (self , lowercase__ ): snake_case_ : int = self.get_duplicate_clusters() with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" snake_case_ , snake_case_ : str = element snake_case_ : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] ): """simple docstring""" with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ): if data is not None: yield data def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float ): """simple docstring""" snake_case_ : int = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE__ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE__ ) ) , max_queue_size=1_0_0 ) ): di.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" snake_case_ : int = get_tokens(SCREAMING_SNAKE_CASE__ ) snake_case_ : Tuple = get_tokens(SCREAMING_SNAKE_CASE__ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) a_ = None def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" snake_case_ : Optional[Any] = [] for elementa in cluster: snake_case_ : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""] for elementa in extremes: snake_case_ : Any = _shared_dataset[elementa["""base_index"""]]["""content"""] if jaccard_similarity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) >= jaccard_threshold: elementa["copies"] += 1 break else: snake_case_ : Union[str, Any] = 1 extremes.append(SCREAMING_SNAKE_CASE__ ) return extremes def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): """simple docstring""" global _shared_dataset snake_case_ : str = dataset snake_case_ : int = [] snake_case_ : Optional[int] = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE__ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) , total=len(SCREAMING_SNAKE_CASE__ ) , ): extremes_list.append(SCREAMING_SNAKE_CASE__ ) return extremes_list def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float = 0.85 ): """simple docstring""" snake_case_ : List[str] = make_duplicate_clusters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster} snake_case_ : str = {} snake_case_ : Dict = find_extremes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for extremes in extremes_clusters: for element in extremes: snake_case_ : int = element snake_case_ : Optional[int] = duplicate_indices - set(extreme_dict.keys() ) snake_case_ : List[Any] = dataset.filter(lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE__ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: snake_case_ : List[Any] = element["""base_index"""] in extreme_dict if element["is_extreme"]: snake_case_ : str = extreme_dict[element["""base_index"""]]["""copies"""] print(f'Original dataset size: {len(SCREAMING_SNAKE_CASE__ )}' ) print(f'Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE__ )}' ) print(f'Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' ) print(f'Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' ) print(f'Filtered dataset size: {len(SCREAMING_SNAKE_CASE__ )}' ) return ds_filter, duplicate_clusters
48
0
"""simple docstring""" import collections import os import re from pathlib import Path a_ = 'src/transformers' # Matches is_xxx_available() a_ = re.compile(r'''is\_([a-z_]*)_available()''') # Catches a one-line _import_struct = {xxx} a_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] a_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''') # Catches a line if not is_foo_available a_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''') # Catches a line _import_struct["bla"].append("foo") a_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] a_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''') # Catches a line with an object between quotes and a comma: "MyModel", a_ = re.compile(r'''^\s+"([^"]+)",''') # Catches a line with objects between brackets only: ["foo", "bar"], a_ = re.compile(r'''^\s+\[([^\]]+)\]''') # Catches a line with from foo import bar, bla, boo a_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') # Catches a line with try: a_ = re.compile(r'''^\s*try:''') # Catches a line with else: a_ = re.compile(r'''^\s*else:''') def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ): """simple docstring""" if _re_test_backend.search(_SCREAMING_SNAKE_CASE ) is None: return None snake_case_ : List[str] = [b[0] for b in _re_backend.findall(_SCREAMING_SNAKE_CASE )] backends.sort() return "_and_".join(_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" with open(_SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: snake_case_ : str = f.readlines() snake_case_ : str = 0 while line_index < len(_SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_SCREAMING_SNAKE_CASE ): return None # First grab the objects without a specific backend in _import_structure snake_case_ : List[Any] = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: snake_case_ : int = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ): snake_case_ : Union[str, Any] = _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ).groups()[0] snake_case_ : List[str] = re.findall(R"""\[([^\]]+)\]""" , _SCREAMING_SNAKE_CASE ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue snake_case_ : int = _re_import_struct_key_value.search(_SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: snake_case_ : Union[str, Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(_SCREAMING_SNAKE_CASE ) > 0] objects.extend(_SCREAMING_SNAKE_CASE ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 snake_case_ : int = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. snake_case_ : Dict = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ : Any = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ : List[Any] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): snake_case_ : int = lines[line_index] if _re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ) is not None: objects.append(_re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ).groups()[0] ) elif _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ) is not None: snake_case_ : str = _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(""", """ ) snake_case_ : Dict = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0] objects.extend(_SCREAMING_SNAKE_CASE ) elif _re_between_brackets.search(_SCREAMING_SNAKE_CASE ) is not None: snake_case_ : Any = _re_between_brackets.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(""", """ ) snake_case_ : int = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0] objects.extend(_SCREAMING_SNAKE_CASE ) elif _re_quote_object.search(_SCREAMING_SNAKE_CASE ) is not None: objects.append(_re_quote_object.search(_SCREAMING_SNAKE_CASE ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 1_2 + """\"""" ): objects.append(line[1_3:-3] ) line_index += 1 snake_case_ : str = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend snake_case_ : Dict = [] while ( line_index < len(_SCREAMING_SNAKE_CASE ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): snake_case_ : List[str] = lines[line_index] snake_case_ : str = _re_import.search(_SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 snake_case_ : List[str] = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(_SCREAMING_SNAKE_CASE ): # If the line is an if is_backend_available, we grab all objects associated. snake_case_ : Dict = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ : List[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ : Optional[int] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): snake_case_ : List[str] = lines[line_index] snake_case_ : List[Any] = _re_import.search(_SCREAMING_SNAKE_CASE ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 1_2 ): objects.append(line[1_2:-2] ) line_index += 1 snake_case_ : Union[str, Any] = objects else: line_index += 1 return import_dict_objects, type_hint_objects def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ): """simple docstring""" def find_duplicates(SCREAMING_SNAKE_CASE__ : Dict ): return [k for k, v in collections.Counter(_SCREAMING_SNAKE_CASE ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] snake_case_ : Dict = [] for key in import_dict_objects.keys(): snake_case_ : Tuple = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'Duplicate _import_structure definitions for: {duplicate_imports}' ) snake_case_ : Tuple = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): snake_case_ : str = """base imports""" if key == """none""" else f'{key} backend' errors.append(f'Differences for {name}:' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f' {a} in TYPE_HINT but not in _import_structure.' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f' {a} in _import_structure but not in TYPE_HINT.' ) return errors def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : Tuple = [] for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ): if "__init__.py" in files: snake_case_ : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , """__init__.py""" ) snake_case_ : str = parse_init(_SCREAMING_SNAKE_CASE ) if objects is not None: snake_case_ : Optional[Any] = analyze_results(*_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: snake_case_ : str = f'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}' failures.append("""\n""".join(_SCREAMING_SNAKE_CASE ) ) if len(_SCREAMING_SNAKE_CASE ) > 0: raise ValueError("""\n\n""".join(_SCREAMING_SNAKE_CASE ) ) def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : int = [] for path, directories, files in os.walk(_SCREAMING_SNAKE_CASE ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(_SCREAMING_SNAKE_CASE ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_SCREAMING_SNAKE_CASE ) / folder).glob("""*.py""" ) ) ) == 0: continue snake_case_ : int = str((Path(_SCREAMING_SNAKE_CASE ) / folder).relative_to(_SCREAMING_SNAKE_CASE ) ) snake_case_ : Union[str, Any] = short_path.replace(os.path.sep , """.""" ) submodules.append(_SCREAMING_SNAKE_CASE ) for fname in files: if fname == "__init__.py": continue snake_case_ : Tuple = str((Path(_SCREAMING_SNAKE_CASE ) / fname).relative_to(_SCREAMING_SNAKE_CASE ) ) snake_case_ : List[str] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(_SCREAMING_SNAKE_CASE ) return submodules a_ = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', 'models.esm.openfold_utils', ] def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" from transformers.utils import direct_transformers_import snake_case_ : Optional[int] = direct_transformers_import(_SCREAMING_SNAKE_CASE ) snake_case_ : List[str] = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(_SCREAMING_SNAKE_CASE , """__init__.py""" ) , """r""" ) as f: snake_case_ : Any = f.read() import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , _SCREAMING_SNAKE_CASE ) ) ) snake_case_ : List[Any] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(_SCREAMING_SNAKE_CASE ) > 0: snake_case_ : str = """\n""".join(f'- {module}' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registed in the main init of Transformers:\n""" f'{list_of_modules}\n' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
702
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) a_ = logging.getLogger(__name__) if __name__ == "__main__": a_ = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=30522, type=int) a_ = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, '''rb''') as fp: a_ = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') a_ = Counter() for tk_ids in data: counter.update(tk_ids) a_ = [0] * args.vocab_size for k, v in counter.items(): a_ = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
48
0
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 deprecate( '''stable diffusion controlnet''', '''0.22.0''', '''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''', standard_warn=False, stacklevel=3, )
703
"""simple docstring""" import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ): """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): """simple docstring""" snake_case_ : Optional[Any] = tmp_path / """cache""" snake_case_ : Optional[int] = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): snake_case_ : Tuple = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read() _check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): """simple docstring""" snake_case_ : List[Any] = tmp_path / """cache""" snake_case_ : int = {"""text""": """string"""} snake_case_ : Any = features.copy() if features else default_expected_features snake_case_ : List[Any] = ( Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None ) snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" snake_case_ : Union[str, Any] = tmp_path / """cache""" snake_case_ : Optional[Any] = {"""text""": """string"""} snake_case_ : Optional[int] = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read() _check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ): """simple docstring""" if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ : List[str] = text_path elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ : str = [text_path] snake_case_ : List[str] = tmp_path / """cache""" snake_case_ : List[str] = {"""text""": """string"""} snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=("train",) ): """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for split in splits: snake_case_ : Dict = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" snake_case_ : int = tmp_path / """cache""" snake_case_ : List[str] = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): snake_case_ : Optional[Any] = TextDatasetReader({"""train""": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read() _check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" snake_case_ : Tuple = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" snake_case_ : List[str] = {"""text""": """string"""} snake_case_ : int = features.copy() if features else default_expected_features snake_case_ : Tuple = ( Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None ) snake_case_ : str = TextDatasetReader({"""train""": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" if split: snake_case_ : Union[str, Any] = {split: text_path} else: snake_case_ : Union[str, Any] = """train""" snake_case_ : int = {"""train""": text_path, """test""": text_path} snake_case_ : List[Any] = tmp_path / """cache""" snake_case_ : Tuple = {"""text""": """string"""} snake_case_ : int = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
48
0
"""simple docstring""" import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ): return 1.0 / (1.0 + np.exp(-_outputs )) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ): snake_case_ : Union[str, Any] = np.max(_outputs , axis=-1 , keepdims=_lowercase ) snake_case_ : List[Any] = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowercase ) class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : str = """sigmoid""" _A : Union[str, Any] = """softmax""" _A : int = """none""" @add_end_docstrings( _UpperCAmelCase , R""" return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `\"default\"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `\"sigmoid\"`: Applies the sigmoid function on the output. - `\"softmax\"`: Applies the softmax function on the output. - `\"none\"`: Does not apply any function on the output. """ , ) class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Optional[Any] = False _A : Optional[int] = ClassificationFunction.NONE def __init__(self , **lowercase__ ): super().__init__(**UpperCamelCase_ ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def __UpperCamelCase (self , lowercase__=None , lowercase__=None , lowercase__="" , **lowercase__ ): snake_case_ : Optional[int] = tokenizer_kwargs snake_case_ : Optional[Any] = {} if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None: snake_case_ : Union[str, Any] = self.model.config.return_all_scores if isinstance(UpperCamelCase_ , UpperCamelCase_ ) or top_k is None: snake_case_ : List[Any] = top_k snake_case_ : Tuple = False elif return_all_scores is not None: warnings.warn( """`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of""" """ `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , UpperCamelCase_ , ) if return_all_scores: snake_case_ : Optional[int] = None else: snake_case_ : int = 1 if isinstance(UpperCamelCase_ , UpperCamelCase_ ): snake_case_ : Dict = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: snake_case_ : Optional[Any] = function_to_apply return preprocess_params, {}, postprocess_params def __call__(self , *lowercase__ , **lowercase__ ): snake_case_ : Optional[int] = super().__call__(*UpperCamelCase_ , **UpperCamelCase_ ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. snake_case_ : Union[str, Any] = 'top_k' not in kwargs if isinstance(args[0] , UpperCamelCase_ ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def __UpperCamelCase (self , lowercase__ , **lowercase__ ): snake_case_ : Optional[Any] = self.framework if isinstance(UpperCamelCase_ , UpperCamelCase_ ): return self.tokenizer(**UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ ) elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) == 1 and isinstance(inputs[0] , UpperCamelCase_ ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=UpperCamelCase_ , **UpperCamelCase_ ) elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( """The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a""" """ dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" ) return self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ ) def __UpperCamelCase (self , lowercase__ ): return self.model(**UpperCamelCase_ ) def __UpperCamelCase (self , lowercase__ , lowercase__=None , lowercase__=1 , lowercase__=True ): if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: snake_case_ : Any = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: snake_case_ : str = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None: snake_case_ : List[Any] = self.model.config.function_to_apply else: snake_case_ : List[str] = ClassificationFunction.NONE snake_case_ : Optional[Any] = model_outputs['logits'][0] snake_case_ : int = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: snake_case_ : Union[str, Any] = sigmoid(UpperCamelCase_ ) elif function_to_apply == ClassificationFunction.SOFTMAX: snake_case_ : List[Any] = softmax(UpperCamelCase_ ) elif function_to_apply == ClassificationFunction.NONE: snake_case_ : List[str] = outputs else: raise ValueError(f'Unrecognized `function_to_apply` argument: {function_to_apply}' ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} snake_case_ : Union[str, Any] = [ {'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(UpperCamelCase_ ) ] if not _legacy: dict_scores.sort(key=lambda lowercase__ : x["score"] , reverse=UpperCamelCase_ ) if top_k is not None: snake_case_ : Any = dict_scores[:top_k] return dict_scores
704
"""simple docstring""" from copy import deepcopy class __lowercase : """simple docstring""" def __init__(self , lowercase__ = None , lowercase__ = None ): if arr is None and size is not None: snake_case_ : str = size snake_case_ : Optional[Any] = [0] * size elif arr is not None: self.init(lowercase__ ) else: raise ValueError("""Either arr or size must be specified""" ) def __UpperCamelCase (self , lowercase__ ): snake_case_ : Optional[Any] = len(lowercase__ ) snake_case_ : int = deepcopy(lowercase__ ) for i in range(1 , self.size ): snake_case_ : Optional[Any] = self.next_(lowercase__ ) if j < self.size: self.tree[j] += self.tree[i] def __UpperCamelCase (self ): snake_case_ : Dict = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): snake_case_ : Optional[int] = self.next_(lowercase__ ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def __UpperCamelCase (lowercase__ ): return index + (index & (-index)) @staticmethod def __UpperCamelCase (lowercase__ ): return index - (index & (-index)) def __UpperCamelCase (self , lowercase__ , lowercase__ ): if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value snake_case_ : Tuple = self.next_(lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ ): self.add(lowercase__ , value - self.get(lowercase__ ) ) def __UpperCamelCase (self , lowercase__ ): if right == 0: return 0 snake_case_ : List[str] = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] snake_case_ : Optional[int] = self.prev(lowercase__ ) return result def __UpperCamelCase (self , lowercase__ , lowercase__ ): return self.prefix(lowercase__ ) - self.prefix(lowercase__ ) def __UpperCamelCase (self , lowercase__ ): return self.query(lowercase__ , index + 1 ) def __UpperCamelCase (self , lowercase__ ): value -= self.tree[0] if value < 0: return -1 snake_case_ : Tuple = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 snake_case_ : Tuple = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
48
0
"""simple docstring""" from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge a_ = [ "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the" " final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe" " depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.", "The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal" " accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's" " founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the" " body.", "Amnesty International releases its annual report on the death penalty. The report catalogs the use of" " state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the" " world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital" " punishment.", ] a_ = [ "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ." " Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz" " had informed his Lufthansa training school of an episode of severe depression, airline says .", "Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ." " Israel and the United States opposed the move, which could open the door to war crimes investigations against" " Israelis .", "Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to" " death . Organization claims that governments around the world are using the threat of terrorism to advance" " executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death" " sentences up by 28% .", ] def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : Union[str, Any] = calculate_rouge(_A , _A , bootstrap_aggregation=_A , rouge_keys=["""rouge2""", """rougeL"""] ) assert isinstance(_A , _A ) snake_case_ : Any = calculate_rouge(_A , _A , bootstrap_aggregation=_A , rouge_keys=["""rouge2"""] ) assert ( pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean() ) def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : Optional[int] = "rougeLsum" snake_case_ : Tuple = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=[k] )[k] snake_case_ : Union[str, Any] = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=[k] )[k] assert score > score_no_sep def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : List[Any] = ["rouge1", "rouge2", "rougeL"] snake_case_ : Union[str, Any] = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=_A ) snake_case_ : List[Any] = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=_A ) assert score_sep == score_no_sep def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : Optional[int] = [ "Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.", "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .", ] snake_case_ : int = [ "Margot Frank, died in 1945, a month earlier than previously thought.", "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of" " the final seconds on board Flight 9525.", ] assert calculate_rouge(_A , _A , newline_sep=_A ) == calculate_rouge(_A , _A , newline_sep=_A ) def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : Optional[Any] = [ "\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" " ] snake_case_ : Dict = [ " Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ." ] snake_case_ : Tuple = calculate_rouge(_A , _A , rouge_keys=["""rougeLsum"""] , newline_sep=_A )["rougeLsum"] snake_case_ : Optional[int] = calculate_rouge(_A , _A , rouge_keys=["""rougeLsum"""] )["rougeLsum"] assert new_score > prev_score def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : Union[str, Any] = Path("""examples/seq2seq/test_data/wmt_en_ro""" ) snake_case_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) ) assert isinstance(_A , _A ) snake_case_ : int = calculate_rouge_path( data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=_A ) assert isinstance(_A , _A )
705
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list ): """simple docstring""" snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ ) for i in range(1 , SCREAMING_SNAKE_CASE__ ): snake_case_ : Tuple = collection[i] snake_case_ : Tuple = 0 snake_case_ : str = i - 1 while low <= high: snake_case_ : Optional[int] = (low + high) // 2 if val < collection[mid]: snake_case_ : List[str] = mid - 1 else: snake_case_ : str = mid + 1 for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ): snake_case_ : List[str] = collection[j - 1] snake_case_ : Any = val return collection if __name__ == "__main__": a_ = input('''Enter numbers separated by a comma:\n''').strip() a_ = [int(item) for item in user_input.split(''',''')] print(binary_insertion_sort(unsorted))
48
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a_ = { '''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''], '''tokenization_roc_bert''': ['''RoCBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ '''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoCBertForCausalLM''', '''RoCBertForMaskedLM''', '''RoCBertForMultipleChoice''', '''RoCBertForPreTraining''', '''RoCBertForQuestionAnswering''', '''RoCBertForSequenceClassification''', '''RoCBertForTokenClassification''', '''RoCBertLayer''', '''RoCBertModel''', '''RoCBertPreTrainedModel''', '''load_tf_weights_in_roc_bert''', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
706
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Union[str, Any] = ["""image_processor""", """tokenizer"""] _A : str = """ChineseCLIPImageProcessor""" _A : Tuple = ("""BertTokenizer""", """BertTokenizerFast""") def __init__(self , lowercase__=None , lowercase__=None , **lowercase__ ): snake_case_ : Any = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , lowercase__ , ) snake_case_ : Optional[Any] = kwargs.pop("""feature_extractor""" ) snake_case_ : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(lowercase__ , lowercase__ ) snake_case_ : Union[str, Any] = self.image_processor def __call__(self , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ): if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: snake_case_ : Any = self.tokenizer(lowercase__ , return_tensors=lowercase__ , **lowercase__ ) if images is not None: snake_case_ : Tuple = self.image_processor(lowercase__ , return_tensors=lowercase__ , **lowercase__ ) if text is not None and images is not None: snake_case_ : List[Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ ) def __UpperCamelCase (self , *lowercase__ , **lowercase__ ): return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ ) def __UpperCamelCase (self , *lowercase__ , **lowercase__ ): return self.tokenizer.decode(*lowercase__ , **lowercase__ ) @property def __UpperCamelCase (self ): snake_case_ : Optional[int] = self.tokenizer.model_input_names snake_case_ : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __UpperCamelCase (self ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase__ , ) return self.image_processor_class
48
0
"""simple docstring""" import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging a_ = logging.get_logger(__name__) a_ = {"vocab_file": "vocab.txt"} a_ = { "vocab_file": { "facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt", "facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt", }, } a_ = { "facebook/esm2_t6_8M_UR50D": 1024, "facebook/esm2_t12_35M_UR50D": 1024, } def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" with open(SCREAMING_SNAKE_CASE__ , """r""" ) as f: snake_case_ : int = f.read().splitlines() return [l.strip() for l in lines] class __lowercase ( UpperCAmelCase_): """simple docstring""" _A : str = VOCAB_FILES_NAMES _A : int = PRETRAINED_VOCAB_FILES_MAP _A : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A : List[str] = ['input_ids', 'attention_mask'] def __init__(self , lowercase__ , lowercase__="<unk>" , lowercase__="<cls>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__="<eos>" , **lowercase__ , ): super().__init__(**_lowercase ) snake_case_ : Dict = load_vocab_file(_lowercase ) snake_case_ : Tuple = dict(enumerate(self.all_tokens ) ) snake_case_ : Dict = {tok: ind for ind, tok in enumerate(self.all_tokens )} snake_case_ : Optional[Any] = unk_token snake_case_ : Dict = cls_token snake_case_ : List[Any] = pad_token snake_case_ : List[Any] = mask_token snake_case_ : Dict = eos_token snake_case_ : Dict = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def __UpperCamelCase (self , lowercase__ ): return self._id_to_token.get(_lowercase , self.unk_token ) def __UpperCamelCase (self , lowercase__ ): return self._token_to_id.get(_lowercase , self._token_to_id.get(self.unk_token ) ) def __UpperCamelCase (self , lowercase__ , **lowercase__ ): return text.split() def __UpperCamelCase (self , lowercase__=False ): return len(self._id_to_token ) def __UpperCamelCase (self ): return {token: i for i, token in enumerate(self.all_tokens )} def __UpperCamelCase (self , lowercase__ ): return self._token_to_id.get(_lowercase , self._token_to_id.get(self.unk_token ) ) def __UpperCamelCase (self , lowercase__ ): return self._id_to_token.get(_lowercase , self.unk_token ) def __UpperCamelCase (self , lowercase__ , lowercase__ = None ): snake_case_ : Union[str, Any] = [self.cls_token_id] snake_case_ : Union[str, Any] = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] snake_case_ : Optional[int] = [1] + ([0] * len(_lowercase )) + [1] if token_ids_a is not None: mask += [0] * len(_lowercase ) + [1] return mask def __UpperCamelCase (self , lowercase__ , lowercase__ ): snake_case_ : Any = os.path.join(_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" ) with open(_lowercase , """w""" ) as f: f.write("""\n""".join(self.all_tokens ) ) return (vocab_file,) @property def __UpperCamelCase (self ): return self.get_vocab_size(with_added_tokens=_lowercase ) def __UpperCamelCase (self , lowercase__ , lowercase__ = False ): return super()._add_tokens(_lowercase , special_tokens=_lowercase )
707
"""simple docstring""" import argparse import copy def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" snake_case_ : List[Any] = {} with open(SCREAMING_SNAKE_CASE__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: snake_case_ : int = [] _list.append([line.split()[1], line.split()[2]] ) snake_case_ : Optional[Any] = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: snake_case_ : str = [] _list.append([line.split()[0], line.split()[2]] ) snake_case_ : Optional[Any] = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" with open(SCREAMING_SNAKE_CASE__ ) as f: snake_case_ : Optional[Any] = f.read(1 ) snake_case_ : Union[str, Any] = start_node snake_case_ : Dict = [] snake_case_ : Union[str, Any] = start_node snake_case_ : Tuple = 0 while visiting not in first_solution: snake_case_ : int = 1_0_0_0_0 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution: snake_case_ : Union[str, Any] = k[1] snake_case_ : Any = k[0] first_solution.append(SCREAMING_SNAKE_CASE__ ) snake_case_ : Tuple = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ ) snake_case_ : List[str] = best_node first_solution.append(SCREAMING_SNAKE_CASE__ ) snake_case_ : Optional[Any] = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 snake_case_ : int = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0_0_0_0 ) return first_solution, distance_of_first_solution def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" snake_case_ : Union[str, Any] = [] for n in solution[1:-1]: snake_case_ : str = solution.index(SCREAMING_SNAKE_CASE__ ) for kn in solution[1:-1]: snake_case_ : Tuple = solution.index(SCREAMING_SNAKE_CASE__ ) if n == kn: continue snake_case_ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ ) snake_case_ : int = kn snake_case_ : Dict = n snake_case_ : Optional[int] = 0 for k in _tmp[:-1]: snake_case_ : Dict = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: snake_case_ : Dict = distance + int(i[1] ) _tmp.append(SCREAMING_SNAKE_CASE__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) snake_case_ : Optional[Any] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ): """simple docstring""" snake_case_ : Dict = 1 snake_case_ : List[Any] = first_solution snake_case_ : List[Any] = [] snake_case_ : Optional[Any] = distance_of_first_solution snake_case_ : Dict = solution while count <= iters: snake_case_ : List[str] = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ : List[Any] = 0 snake_case_ : List[Any] = neighborhood[index_of_best_solution] snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1 snake_case_ : List[str] = False while not found: snake_case_ : Tuple = 0 while i < len(SCREAMING_SNAKE_CASE__ ): if best_solution[i] != solution[i]: snake_case_ : Optional[Any] = best_solution[i] snake_case_ : int = solution[i] break snake_case_ : List[str] = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) snake_case_ : Tuple = True snake_case_ : Dict = best_solution[:-1] snake_case_ : Tuple = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: snake_case_ : Tuple = cost snake_case_ : Union[str, Any] = solution else: snake_case_ : str = index_of_best_solution + 1 snake_case_ : Tuple = neighborhood[index_of_best_solution] if len(SCREAMING_SNAKE_CASE__ ) >= size: tabu_list.pop(0 ) snake_case_ : List[str] = count + 1 return best_solution_ever, best_cost def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ): """simple docstring""" snake_case_ : Tuple = generate_neighbours(args.File ) snake_case_ , snake_case_ : Optional[Any] = generate_first_solution( args.File , SCREAMING_SNAKE_CASE__ ) snake_case_ , snake_case_ : Dict = tabu_search( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , ) print(f'Best solution: {best_sol}, with total distance: {best_cost}.' ) if __name__ == "__main__": a_ = argparse.ArgumentParser(description='''Tabu Search''') parser.add_argument( '''-f''', '''--File''', type=str, help='''Path to the file containing the data''', required=True, ) parser.add_argument( '''-i''', '''--Iterations''', type=int, help='''How many iterations the algorithm should perform''', required=True, ) parser.add_argument( '''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True ) # Pass the arguments to main method main(parser.parse_args())
48
0
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class __lowercase ( unittest.TestCase): """simple docstring""" def __UpperCamelCase (self ): snake_case_ : Any = inspect.getfile(accelerate.test_utils ) snake_case_ : Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] ) snake_case_ : Union[str, Any] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] ) snake_case_ : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] ) @require_multi_gpu def __UpperCamelCase (self ): print(f'Found {torch.cuda.device_count()} devices.' ) snake_case_ : int = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowercase__ , env=os.environ.copy() ) @require_multi_gpu def __UpperCamelCase (self ): print(f'Found {torch.cuda.device_count()} devices.' ) snake_case_ : List[str] = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path] print(f'Command: {cmd}' ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowercase__ , env=os.environ.copy() ) @require_multi_gpu def __UpperCamelCase (self ): snake_case_ : Optional[int] = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowercase__ , env=os.environ.copy() ) @require_multi_gpu def __UpperCamelCase (self ): print(f'Found {torch.cuda.device_count()} devices, using 2 devices only' ) snake_case_ : List[str] = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ): execute_subprocess_async(lowercase__ , env=os.environ.copy() ) if __name__ == "__main__": a_ = Accelerator() a_ = (accelerator.state.process_index + 2, 10) a_ = torch.randint(0, 10, shape).to(accelerator.device) a_ = '''''' a_ = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." a_ = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." a_ = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
708
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings a_ = r''' [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: title_sep (`str`, *optional*, defaults to `" / "`): Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`]. doc_sep (`str`, *optional*, defaults to `" // "`): Separator inserted between the text of the retrieved document and the original input when calling [`RagRetriever`]. n_docs (`int`, *optional*, defaults to 5): Number of documents to retrieve. max_combined_length (`int`, *optional*, defaults to 300): Max length of contextualized input returned by [`~RagRetriever.__call__`]. retrieval_vector_size (`int`, *optional*, defaults to 768): Dimensionality of the document embeddings indexed by [`RagRetriever`]. retrieval_batch_size (`int`, *optional*, defaults to 8): Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated [`RagRetriever`]. dataset (`str`, *optional*, defaults to `"wiki_dpr"`): A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids using `datasets.list_datasets()`). dataset_split (`str`, *optional*, defaults to `"train"`) Which split of the `dataset` to load. index_name (`str`, *optional*, defaults to `"compressed"`) The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and `"compressed"`. index_path (`str`, *optional*) The path to the serialized faiss index on disk. passages_path (`str`, *optional*): A path to text passages compatible with the faiss index. Required if using [`~models.rag.retrieval_rag.LegacyIndex`] use_dummy_dataset (`bool`, *optional*, defaults to `False`) Whether to load a "dummy" variant of the dataset specified by `dataset`. label_smoothing (`float`, *optional*, defaults to 0.0): Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. do_marginalize (`bool`, *optional*, defaults to `False`): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. reduce_loss (`bool`, *optional*, defaults to `False`): Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation. do_deduplication (`bool`, *optional*, defaults to `True`): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. exclude_bos_score (`bool`, *optional*, defaults to `False`): Whether or not to disregard the BOS token when computing the loss. output_retrieved(`bool`, *optional*, defaults to `False`): If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask` are returned. See returned tensors for more detail. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. ''' @add_start_docstrings(_UpperCAmelCase) class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Optional[int] = """rag""" _A : Optional[Any] = True def __init__(self , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=" / " , lowercase__=" // " , lowercase__=5 , lowercase__=3_00 , lowercase__=7_68 , lowercase__=8 , lowercase__="wiki_dpr" , lowercase__="train" , lowercase__="compressed" , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=None , **lowercase__ , ): super().__init__( bos_token_id=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , prefix=lowercase__ , vocab_size=lowercase__ , **lowercase__ , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" snake_case_ : List[Any] = kwargs.pop("""question_encoder""" ) snake_case_ : Tuple = question_encoder_config.pop("""model_type""" ) snake_case_ : List[str] = kwargs.pop("""generator""" ) snake_case_ : List[str] = decoder_config.pop("""model_type""" ) from ..auto.configuration_auto import AutoConfig snake_case_ : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ ) snake_case_ : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ ) snake_case_ : int = reduce_loss snake_case_ : Optional[int] = label_smoothing snake_case_ : Dict = exclude_bos_score snake_case_ : Union[str, Any] = do_marginalize snake_case_ : Union[str, Any] = title_sep snake_case_ : int = doc_sep snake_case_ : int = n_docs snake_case_ : List[str] = max_combined_length snake_case_ : Tuple = dataset snake_case_ : int = dataset_split snake_case_ : str = index_name snake_case_ : List[str] = retrieval_vector_size snake_case_ : Dict = retrieval_batch_size snake_case_ : str = passages_path snake_case_ : Union[str, Any] = index_path snake_case_ : Tuple = use_dummy_dataset snake_case_ : Dict = output_retrieved snake_case_ : str = do_deduplication snake_case_ : Any = use_cache if self.forced_eos_token_id is None: snake_case_ : Any = getattr(self.generator , """forced_eos_token_id""" , lowercase__ ) @classmethod def __UpperCamelCase (cls , lowercase__ , lowercase__ , **lowercase__ ): return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase__ ) def __UpperCamelCase (self ): snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ ) snake_case_ : Any = self.question_encoder.to_dict() snake_case_ : Dict = self.generator.to_dict() snake_case_ : Union[str, Any] = self.__class__.model_type return output
48
0
"""simple docstring""" from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Optional[Any] = 42 _A : List[str] = 42 class __lowercase ( nn.Module): """simple docstring""" _A : int = 42 _A : List[Any] = (16, 32, 96, 256) _A : Optional[int] = jnp.floataa def __UpperCamelCase (self ): snake_case_ : Optional[Any] = nn.Conv( self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) snake_case_ : int = [] for i in range(len(self.block_out_channels ) - 1 ): snake_case_ : Union[str, Any] = self.block_out_channels[i] snake_case_ : Optional[int] = self.block_out_channels[i + 1] snake_case_ : Optional[int] = nn.Conv( _lowerCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(_lowerCAmelCase ) snake_case_ : Optional[int] = nn.Conv( _lowerCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(_lowerCAmelCase ) snake_case_ : Optional[Any] = blocks snake_case_ : Any = nn.Conv( self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__(self , lowercase__ ): snake_case_ : Union[str, Any] = self.conv_in(_lowerCAmelCase ) snake_case_ : Tuple = nn.silu(_lowerCAmelCase ) for block in self.blocks: snake_case_ : str = block(_lowerCAmelCase ) snake_case_ : int = nn.silu(_lowerCAmelCase ) snake_case_ : str = self.conv_out(_lowerCAmelCase ) return embedding @flax_register_to_config class __lowercase ( nn.Module , _UpperCAmelCase , _UpperCAmelCase): """simple docstring""" _A : Dict = 32 _A : List[str] = 4 _A : Optional[int] = ( """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""", """DownBlock2D""", ) _A : Optional[Any] = False _A : Union[str, Any] = (320, 640, 1280, 1280) _A : str = 2 _A : Optional[int] = 8 _A : str = None _A : Any = 1280 _A : List[Any] = 0.0 _A : Optional[int] = False _A : str = jnp.floataa _A : List[Any] = True _A : Optional[Any] = 0 _A : Union[str, Any] = """rgb""" _A : str = (16, 32, 96, 256) def __UpperCamelCase (self , lowercase__ ): # init input tensors snake_case_ : Optional[Any] = (1, self.in_channels, self.sample_size, self.sample_size) snake_case_ : Any = jnp.zeros(_lowerCAmelCase , dtype=jnp.floataa ) snake_case_ : Tuple = jnp.ones((1,) , dtype=jnp.intaa ) snake_case_ : Tuple = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) snake_case_ : int = (1, 3, self.sample_size * 8, self.sample_size * 8) snake_case_ : Dict = jnp.zeros(_lowerCAmelCase , dtype=jnp.floataa ) snake_case_ , snake_case_ : str = jax.random.split(_lowerCAmelCase ) snake_case_ : Union[str, Any] = {"""params""": params_rng, """dropout""": dropout_rng} return self.init(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )["params"] def __UpperCamelCase (self ): snake_case_ : Dict = self.block_out_channels snake_case_ : str = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. snake_case_ : int = self.num_attention_heads or self.attention_head_dim # input snake_case_ : int = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time snake_case_ : List[Any] = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) snake_case_ : Tuple = FlaxTimestepEmbedding(_lowerCAmelCase , dtype=self.dtype ) snake_case_ : Optional[Any] = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , ) snake_case_ : Dict = self.only_cross_attention if isinstance(_lowerCAmelCase , _lowerCAmelCase ): snake_case_ : Tuple = (only_cross_attention,) * len(self.down_block_types ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ): snake_case_ : Optional[int] = (num_attention_heads,) * len(self.down_block_types ) # down snake_case_ : Dict = [] snake_case_ : Optional[int] = [] snake_case_ : Tuple = block_out_channels[0] snake_case_ : Tuple = nn.Conv( _lowerCAmelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(_lowerCAmelCase ) for i, down_block_type in enumerate(self.down_block_types ): snake_case_ : Union[str, Any] = output_channel snake_case_ : Union[str, Any] = block_out_channels[i] snake_case_ : List[str] = i == len(_lowerCAmelCase ) - 1 if down_block_type == "CrossAttnDownBlock2D": snake_case_ : int = FlaxCrossAttnDownBlockaD( in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , ) else: snake_case_ : Union[str, Any] = FlaxDownBlockaD( in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(_lowerCAmelCase ) for _ in range(self.layers_per_block ): snake_case_ : int = nn.Conv( _lowerCAmelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(_lowerCAmelCase ) if not is_final_block: snake_case_ : Union[str, Any] = nn.Conv( _lowerCAmelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(_lowerCAmelCase ) snake_case_ : int = down_blocks snake_case_ : int = controlnet_down_blocks # mid snake_case_ : Optional[Any] = block_out_channels[-1] snake_case_ : int = FlaxUNetMidBlockaDCrossAttn( in_channels=_lowerCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , ) snake_case_ : List[str] = nn.Conv( _lowerCAmelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__(self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1.0 , lowercase__ = True , lowercase__ = False , ): snake_case_ : List[Any] = self.controlnet_conditioning_channel_order if channel_order == "bgr": snake_case_ : Dict = jnp.flip(_lowerCAmelCase , axis=1 ) # 1. time if not isinstance(_lowerCAmelCase , jnp.ndarray ): snake_case_ : Tuple = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(_lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0: snake_case_ : List[str] = timesteps.astype(dtype=jnp.floataa ) snake_case_ : Tuple = jnp.expand_dims(_lowerCAmelCase , 0 ) snake_case_ : Any = self.time_proj(_lowerCAmelCase ) snake_case_ : List[str] = self.time_embedding(_lowerCAmelCase ) # 2. pre-process snake_case_ : List[Any] = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) ) snake_case_ : str = self.conv_in(_lowerCAmelCase ) snake_case_ : List[str] = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) ) snake_case_ : Any = self.controlnet_cond_embedding(_lowerCAmelCase ) sample += controlnet_cond # 3. down snake_case_ : Any = (sample,) for down_block in self.down_blocks: if isinstance(_lowerCAmelCase , _lowerCAmelCase ): snake_case_ , snake_case_ : int = down_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train ) else: snake_case_ , snake_case_ : Dict = down_block(_lowerCAmelCase , _lowerCAmelCase , deterministic=not train ) down_block_res_samples += res_samples # 4. mid snake_case_ : Tuple = self.mid_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train ) # 5. contronet blocks snake_case_ : str = () for down_block_res_sample, controlnet_block in zip(_lowerCAmelCase , self.controlnet_down_blocks ): snake_case_ : Union[str, Any] = controlnet_block(_lowerCAmelCase ) controlnet_down_block_res_samples += (down_block_res_sample,) snake_case_ : List[str] = controlnet_down_block_res_samples snake_case_ : int = self.controlnet_mid_block(_lowerCAmelCase ) # 6. scaling snake_case_ : Dict = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=_lowerCAmelCase , mid_block_res_sample=_lowerCAmelCase )
709
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING a_ = logging.get_logger(__name__) class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Optional[int] = """upernet""" def __init__(self , lowercase__=None , lowercase__=5_12 , lowercase__=0.02 , lowercase__=[1, 2, 3, 6] , lowercase__=True , lowercase__=0.4 , lowercase__=3_84 , lowercase__=2_56 , lowercase__=1 , lowercase__=False , lowercase__=2_55 , **lowercase__ , ): super().__init__(**lowercase__ ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) snake_case_ : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(lowercase__ , lowercase__ ): snake_case_ : Tuple = backbone_config.get("""model_type""" ) snake_case_ : List[str] = CONFIG_MAPPING[backbone_model_type] snake_case_ : List[Any] = config_class.from_dict(lowercase__ ) snake_case_ : List[Any] = backbone_config snake_case_ : Optional[Any] = hidden_size snake_case_ : Any = initializer_range snake_case_ : str = pool_scales snake_case_ : Dict = use_auxiliary_head snake_case_ : str = auxiliary_loss_weight snake_case_ : List[str] = auxiliary_in_channels snake_case_ : Optional[Any] = auxiliary_channels snake_case_ : Any = auxiliary_num_convs snake_case_ : List[Any] = auxiliary_concat_input snake_case_ : List[str] = loss_ignore_index def __UpperCamelCase (self ): snake_case_ : Dict = copy.deepcopy(self.__dict__ ) snake_case_ : Union[str, Any] = self.backbone_config.to_dict() snake_case_ : Any = self.__class__.model_type return output
48
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": ( "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json" ), } class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Union[str, Any] = "ibert" def __init__(self , lowercase__=3_05_22 , lowercase__=7_68 , lowercase__=12 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=1 , lowercase__=0 , lowercase__=2 , lowercase__="absolute" , lowercase__=False , lowercase__="none" , **lowercase__ , ): super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) snake_case_ : List[Any] = vocab_size snake_case_ : Optional[Any] = hidden_size snake_case_ : List[Any] = num_hidden_layers snake_case_ : Any = num_attention_heads snake_case_ : List[str] = hidden_act snake_case_ : List[str] = intermediate_size snake_case_ : Optional[int] = hidden_dropout_prob snake_case_ : Union[str, Any] = attention_probs_dropout_prob snake_case_ : str = max_position_embeddings snake_case_ : List[str] = type_vocab_size snake_case_ : Dict = initializer_range snake_case_ : Optional[int] = layer_norm_eps snake_case_ : Any = position_embedding_type snake_case_ : Tuple = quant_mode snake_case_ : Union[str, Any] = force_dequant class __lowercase ( _UpperCAmelCase): """simple docstring""" @property def __UpperCamelCase (self ): if self.task == "multiple-choice": snake_case_ : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: snake_case_ : Optional[int] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ] )
710
"""simple docstring""" import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask a_ = logging.getLogger(__name__) class __lowercase ( _UpperCAmelCase): """simple docstring""" def __init__(self , lowercase__=-1 ): # in NER datasets, the last column is usually reserved for NER label snake_case_ : Union[str, Any] = label_idx def __UpperCamelCase (self , lowercase__ , lowercase__ ): if isinstance(lowercase__ , lowercase__ ): snake_case_ : List[str] = mode.value snake_case_ : List[Any] = os.path.join(lowercase__ , f'{mode}.txt' ) snake_case_ : Tuple = 1 snake_case_ : Any = [] with open(lowercase__ , encoding="""utf-8""" ) as f: snake_case_ : str = [] snake_case_ : List[Any] = [] for line in f: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) ) guid_index += 1 snake_case_ : Optional[Any] = [] snake_case_ : int = [] else: snake_case_ : Optional[Any] = line.split(""" """ ) words.append(splits[0] ) if len(lowercase__ ) > 1: labels.append(splits[self.label_idx].replace("""\n""" , """""" ) ) else: # Examples could have no label for mode = "test" labels.append("""O""" ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) ) return examples def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : str = 0 for line in test_input_reader: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": writer.write(lowercase__ ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: snake_case_ : Optional[int] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n""" writer.write(lowercase__ ) else: logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] ) def __UpperCamelCase (self , lowercase__ ): if path: with open(lowercase__ , """r""" ) as f: snake_case_ : Dict = f.read().splitlines() if "O" not in labels: snake_case_ : List[Any] = ["""O"""] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class __lowercase ( _UpperCAmelCase): """simple docstring""" def __init__(self ): # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def __UpperCamelCase (self , lowercase__ ): if path: with open(lowercase__ , """r""" ) as f: snake_case_ : Any = f.read().splitlines() if "O" not in labels: snake_case_ : Tuple = ["""O"""] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class __lowercase ( _UpperCAmelCase): """simple docstring""" def __UpperCamelCase (self , lowercase__ , lowercase__ ): if isinstance(lowercase__ , lowercase__ ): snake_case_ : List[Any] = mode.value snake_case_ : Optional[int] = os.path.join(lowercase__ , f'{mode}.txt' ) snake_case_ : Tuple = 1 snake_case_ : str = [] with open(lowercase__ , encoding="""utf-8""" ) as f: for sentence in parse_incr(lowercase__ ): snake_case_ : Tuple = [] snake_case_ : Any = [] for token in sentence: words.append(token["""form"""] ) labels.append(token["""upos"""] ) assert len(lowercase__ ) == len(lowercase__ ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) ) guid_index += 1 return examples def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Dict = 0 for sentence in parse_incr(lowercase__ ): snake_case_ : int = preds_list[example_id] snake_case_ : Dict = """""" for token in sentence: out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ' out += "\n" writer.write(lowercase__ ) example_id += 1 def __UpperCamelCase (self , lowercase__ ): if path: with open(lowercase__ , """r""" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
48
0
"""simple docstring""" import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer a_ = logging.get_logger(__name__) class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Dict = """AutoTokenizer""" _A : List[str] = ["""tokenizer"""] _A : str = { """semantic_prompt""": 1, """coarse_prompt""": 2, """fine_prompt""": 2, } def __init__(self , lowercase__ , lowercase__=None ): super().__init__(__A ) snake_case_ : Optional[Any] = speaker_embeddings @classmethod def __UpperCamelCase (cls , lowercase__ , lowercase__="speaker_embeddings_path.json" , **lowercase__ ): if speaker_embeddings_dict_path is not None: snake_case_ : int = get_file_from_repo( __A , __A , subfolder=kwargs.pop("""subfolder""" , __A ) , cache_dir=kwargs.pop("""cache_dir""" , __A ) , force_download=kwargs.pop("""force_download""" , __A ) , proxies=kwargs.pop("""proxies""" , __A ) , resume_download=kwargs.pop("""resume_download""" , __A ) , local_files_only=kwargs.pop("""local_files_only""" , __A ) , use_auth_token=kwargs.pop("""use_auth_token""" , __A ) , revision=kwargs.pop("""revision""" , __A ) , ) if speaker_embeddings_path is None: logger.warning( f'`{os.path.join(__A , __A )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' ) snake_case_ : Dict = None else: with open(__A ) as speaker_embeddings_json: snake_case_ : Union[str, Any] = json.load(__A ) else: snake_case_ : Optional[int] = None snake_case_ : Any = AutoTokenizer.from_pretrained(__A , **__A ) return cls(tokenizer=__A , speaker_embeddings=__A ) def __UpperCamelCase (self , lowercase__ , lowercase__="speaker_embeddings_path.json" , lowercase__="speaker_embeddings" , lowercase__ = False , **lowercase__ , ): if self.speaker_embeddings is not None: os.makedirs(os.path.join(__A , __A , """v2""" ) , exist_ok=__A ) snake_case_ : Dict = {} snake_case_ : Any = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": snake_case_ : Union[str, Any] = self._load_voice_preset(__A ) snake_case_ : List[str] = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict["""repo_or_path"""] , __A , f'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=__A , ) snake_case_ : List[str] = os.path.join(__A , f'{prompt_key}_{key}.npy' ) snake_case_ : Any = tmp_dict with open(os.path.join(__A , __A ) , """w""" ) as fp: json.dump(__A , __A ) super().save_pretrained(__A , __A , **__A ) def __UpperCamelCase (self , lowercase__ = None , **lowercase__ ): snake_case_ : Optional[int] = self.speaker_embeddings[voice_preset] snake_case_ : int = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( f'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' ) snake_case_ : List[str] = get_file_from_repo( self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , __A ) , cache_dir=kwargs.pop("""cache_dir""" , __A ) , force_download=kwargs.pop("""force_download""" , __A ) , proxies=kwargs.pop("""proxies""" , __A ) , resume_download=kwargs.pop("""resume_download""" , __A ) , local_files_only=kwargs.pop("""local_files_only""" , __A ) , use_auth_token=kwargs.pop("""use_auth_token""" , __A ) , revision=kwargs.pop("""revision""" , __A ) , ) if path is None: raise ValueError( f'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' ) snake_case_ : int = np.load(__A ) return voice_preset_dict def __UpperCamelCase (self , lowercase__ = None ): for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(f'Voice preset unrecognized, missing {key} as a key.' ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' ) def __call__(self , lowercase__=None , lowercase__=None , lowercase__="pt" , lowercase__=2_56 , lowercase__=False , lowercase__=True , lowercase__=False , **lowercase__ , ): if voice_preset is not None and not isinstance(__A , __A ): if ( isinstance(__A , __A ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): snake_case_ : Optional[Any] = self._load_voice_preset(__A ) else: if isinstance(__A , __A ) and not voice_preset.endswith(""".npz""" ): snake_case_ : List[str] = voice_preset + ".npz" snake_case_ : List[Any] = np.load(__A ) if voice_preset is not None: self._validate_voice_preset_dict(__A , **__A ) snake_case_ : List[str] = BatchFeature(data=__A , tensor_type=__A ) snake_case_ : Dict = self.tokenizer( __A , return_tensors=__A , padding="""max_length""" , max_length=__A , return_attention_mask=__A , return_token_type_ids=__A , add_special_tokens=__A , **__A , ) if voice_preset is not None: snake_case_ : Any = voice_preset return encoded_text
711
"""simple docstring""" import random def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" snake_case_ : Union[str, Any] = num - 1 snake_case_ : List[str] = 0 while s % 2 == 0: snake_case_ : str = s // 2 t += 1 for _ in range(5 ): snake_case_ : List[Any] = random.randrange(2 , num - 1 ) snake_case_ : Dict = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if v != 1: snake_case_ : int = 0 while v != (num - 1): if i == t - 1: return False else: snake_case_ : str = i + 1 snake_case_ : int = (v**2) % num return True def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" if num < 2: return False snake_case_ : Dict = [ 2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1, 4_3, 4_7, 5_3, 5_9, 6_1, 6_7, 7_1, 7_3, 7_9, 8_3, 8_9, 9_7, 1_0_1, 1_0_3, 1_0_7, 1_0_9, 1_1_3, 1_2_7, 1_3_1, 1_3_7, 1_3_9, 1_4_9, 1_5_1, 1_5_7, 1_6_3, 1_6_7, 1_7_3, 1_7_9, 1_8_1, 1_9_1, 1_9_3, 1_9_7, 1_9_9, 2_1_1, 2_2_3, 2_2_7, 2_2_9, 2_3_3, 2_3_9, 2_4_1, 2_5_1, 2_5_7, 2_6_3, 2_6_9, 2_7_1, 2_7_7, 2_8_1, 2_8_3, 2_9_3, 3_0_7, 3_1_1, 3_1_3, 3_1_7, 3_3_1, 3_3_7, 3_4_7, 3_4_9, 3_5_3, 3_5_9, 3_6_7, 3_7_3, 3_7_9, 3_8_3, 3_8_9, 3_9_7, 4_0_1, 4_0_9, 4_1_9, 4_2_1, 4_3_1, 4_3_3, 4_3_9, 4_4_3, 4_4_9, 4_5_7, 4_6_1, 4_6_3, 4_6_7, 4_7_9, 4_8_7, 4_9_1, 4_9_9, 5_0_3, 5_0_9, 5_2_1, 5_2_3, 5_4_1, 5_4_7, 5_5_7, 5_6_3, 5_6_9, 5_7_1, 5_7_7, 5_8_7, 5_9_3, 5_9_9, 6_0_1, 6_0_7, 6_1_3, 6_1_7, 6_1_9, 6_3_1, 6_4_1, 6_4_3, 6_4_7, 6_5_3, 6_5_9, 6_6_1, 6_7_3, 6_7_7, 6_8_3, 6_9_1, 7_0_1, 7_0_9, 7_1_9, 7_2_7, 7_3_3, 7_3_9, 7_4_3, 7_5_1, 7_5_7, 7_6_1, 7_6_9, 7_7_3, 7_8_7, 7_9_7, 8_0_9, 8_1_1, 8_2_1, 8_2_3, 8_2_7, 8_2_9, 8_3_9, 8_5_3, 8_5_7, 8_5_9, 8_6_3, 8_7_7, 8_8_1, 8_8_3, 8_8_7, 9_0_7, 9_1_1, 9_1_9, 9_2_9, 9_3_7, 9_4_1, 9_4_7, 9_5_3, 9_6_7, 9_7_1, 9_7_7, 9_8_3, 9_9_1, 9_9_7, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ): """simple docstring""" while True: snake_case_ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) ) if is_prime_low_num(SCREAMING_SNAKE_CASE__ ): return num if __name__ == "__main__": a_ = generate_large_prime() print(('''Prime number:''', num)) print(('''is_prime_low_num:''', is_prime_low_num(num)))
48
0
"""simple docstring""" from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] ): """simple docstring""" if not nums: raise ValueError("""List is empty""" ) return sum(__lowerCAmelCase ) / len(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
712
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType a_ = logging.get_logger(__name__) a_ = { '''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''', '''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''', '''microsoft/deberta-v2-xlarge-mnli''': ( '''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json''' ), '''microsoft/deberta-v2-xxlarge-mnli''': ( '''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json''' ), } class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Dict = """deberta-v2""" def __init__(self , lowercase__=12_81_00 , lowercase__=15_36 , lowercase__=24 , lowercase__=24 , lowercase__=61_44 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=0 , lowercase__=0.02 , lowercase__=1e-7 , lowercase__=False , lowercase__=-1 , lowercase__=0 , lowercase__=True , lowercase__=None , lowercase__=0 , lowercase__="gelu" , **lowercase__ , ): super().__init__(**lowercase__ ) snake_case_ : Union[str, Any] = hidden_size snake_case_ : str = num_hidden_layers snake_case_ : Tuple = num_attention_heads snake_case_ : Dict = intermediate_size snake_case_ : Optional[int] = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Any = attention_probs_dropout_prob snake_case_ : List[Any] = max_position_embeddings snake_case_ : Union[str, Any] = type_vocab_size snake_case_ : Union[str, Any] = initializer_range snake_case_ : List[Any] = relative_attention snake_case_ : Dict = max_relative_positions snake_case_ : Optional[int] = pad_token_id snake_case_ : List[str] = position_biased_input # Backwards compatibility if type(lowercase__ ) == str: snake_case_ : Union[str, Any] = [x.strip() for x in pos_att_type.lower().split("""|""" )] snake_case_ : Optional[int] = pos_att_type snake_case_ : List[str] = vocab_size snake_case_ : Tuple = layer_norm_eps snake_case_ : List[Any] = kwargs.get("""pooler_hidden_size""" , lowercase__ ) snake_case_ : List[str] = pooler_dropout snake_case_ : int = pooler_hidden_act class __lowercase ( _UpperCAmelCase): """simple docstring""" @property def __UpperCamelCase (self ): if self.task == "multiple-choice": snake_case_ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: snake_case_ : int = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def __UpperCamelCase (self ): return 12 def __UpperCamelCase (self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , lowercase__ = 3 , lowercase__ = 40 , lowercase__ = 40 , lowercase__ = None , ): snake_case_ : str = super().generate_dummy_inputs(preprocessor=lowercase__ , framework=lowercase__ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
48
0
"""simple docstring""" import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class __lowercase ( tf.keras.optimizers.schedules.LearningRateSchedule): """simple docstring""" def __init__(self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1.0 , lowercase__ = None , ): super().__init__() snake_case_ : Any = initial_learning_rate snake_case_ : Dict = warmup_steps snake_case_ : Tuple = power snake_case_ : Dict = decay_schedule_fn snake_case_ : Union[str, Any] = name def __call__(self , lowercase__ ): with tf.name_scope(self.name or """WarmUp""" ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. snake_case_ : Dict = tf.cast(lowercase__ , tf.floataa ) snake_case_ : List[str] = tf.cast(self.warmup_steps , tf.floataa ) snake_case_ : Any = global_step_float / warmup_steps_float snake_case_ : Optional[Any] = self.initial_learning_rate * tf.math.pow(lowercase__ , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase__ , ) def __UpperCamelCase (self ): return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] = 0.0 , SCREAMING_SNAKE_CASE__ : Tuple = 0.9 , SCREAMING_SNAKE_CASE__ : List[str] = 0.999 , SCREAMING_SNAKE_CASE__ : str = 1E-8 , SCREAMING_SNAKE_CASE__ : Union[str, Any] = None , SCREAMING_SNAKE_CASE__ : Any = None , SCREAMING_SNAKE_CASE__ : Any = 0.0 , SCREAMING_SNAKE_CASE__ : int = 1.0 , SCREAMING_SNAKE_CASE__ : List[Any] = None , ): """simple docstring""" snake_case_ : Tuple = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=__UpperCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__UpperCAmelCase , ) if num_warmup_steps: snake_case_ : str = WarmUp( initial_learning_rate=__UpperCAmelCase , decay_schedule_fn=__UpperCAmelCase , warmup_steps=__UpperCAmelCase , ) if weight_decay_rate > 0.0: snake_case_ : int = AdamWeightDecay( learning_rate=__UpperCAmelCase , weight_decay_rate=__UpperCAmelCase , beta_a=__UpperCAmelCase , beta_a=__UpperCAmelCase , epsilon=__UpperCAmelCase , clipnorm=__UpperCAmelCase , global_clipnorm=__UpperCAmelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=__UpperCAmelCase , ) else: snake_case_ : Any = tf.keras.optimizers.Adam( learning_rate=__UpperCAmelCase , beta_a=__UpperCAmelCase , beta_a=__UpperCAmelCase , epsilon=__UpperCAmelCase , clipnorm=__UpperCAmelCase , global_clipnorm=__UpperCAmelCase , ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class __lowercase ( _snake_case): """simple docstring""" def __init__(self , lowercase__ = 0.001 , lowercase__ = 0.9 , lowercase__ = 0.999 , lowercase__ = 1e-7 , lowercase__ = False , lowercase__ = 0.0 , lowercase__ = None , lowercase__ = None , lowercase__ = "AdamWeightDecay" , **lowercase__ , ): super().__init__(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ) snake_case_ : Union[str, Any] = weight_decay_rate snake_case_ : List[str] = include_in_weight_decay snake_case_ : List[Any] = exclude_from_weight_decay @classmethod def __UpperCamelCase (cls , lowercase__ ): snake_case_ : List[str] = {"""WarmUp""": WarmUp} return super(lowercase__ , cls ).from_config(lowercase__ , custom_objects=lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): super(lowercase__ , self )._prepare_local(lowercase__ , lowercase__ , lowercase__ ) snake_case_ : List[Any] = tf.constant( self.weight_decay_rate , name="""adam_weight_decay_rate""" ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Tuple = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , ) return tf.no_op() def __UpperCamelCase (self , lowercase__ , lowercase__=None , **lowercase__ ): snake_case_ , snake_case_ : Tuple = list(zip(*lowercase__ ) ) return super(lowercase__ , self ).apply_gradients(zip(lowercase__ , lowercase__ ) , name=lowercase__ , **lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): if apply_state is None: return self._decayed_lr_t[var_dtype], {} snake_case_ : List[Any] = apply_state or {} snake_case_ : str = apply_state.get((var_device, var_dtype) ) if coefficients is None: snake_case_ : int = self._fallback_apply_state(lowercase__ , lowercase__ ) snake_case_ : Tuple = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__=None ): snake_case_ , snake_case_ : List[str] = self._get_lr(var.device , var.dtype.base_dtype , lowercase__ ) snake_case_ : Tuple = self._decay_weights_op(lowercase__ , lowercase__ , lowercase__ ) with tf.control_dependencies([decay] ): return super(lowercase__ , self )._resource_apply_dense(lowercase__ , lowercase__ , **lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ): snake_case_ , snake_case_ : Union[str, Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase__ ) snake_case_ : str = self._decay_weights_op(lowercase__ , lowercase__ , lowercase__ ) with tf.control_dependencies([decay] ): return super(lowercase__ , self )._resource_apply_sparse(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ) def __UpperCamelCase (self ): snake_case_ : Tuple = super().get_config() config.update({"""weight_decay_rate""": self.weight_decay_rate} ) return config def __UpperCamelCase (self , lowercase__ ): if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(lowercase__ , lowercase__ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(lowercase__ , lowercase__ ) is not None: return False return True class __lowercase ( _snake_case): """simple docstring""" def __init__(self ): snake_case_ : str = [] snake_case_ : str = None @property def __UpperCamelCase (self ): if self._accum_steps is None: snake_case_ : str = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def __UpperCamelCase (self ): if not self._gradients: raise ValueError("""The accumulator should be called first to initialize the gradients""" ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__(self , lowercase__ ): if not self._gradients: snake_case_ : Dict = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(lowercase__ ) , trainable=lowercase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(lowercase__ ) != len(self._gradients ): raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(lowercase__ )}' ) for accum_gradient, gradient in zip(self._gradients , lowercase__ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(lowercase__ ) self._accum_steps.assign_add(1 ) def __UpperCamelCase (self ): if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(lowercase__ ) )
713
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
48
0
"""simple docstring""" import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): """simple docstring""" snake_case_ : Dict = f'{sampling_rate}' snake_case_ : Any = """1""" snake_case_ : Any = """f32le""" snake_case_ : Optional[int] = [ """ffmpeg""", """-i""", """pipe:0""", """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] try: with subprocess.Popen(lowerCAmelCase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: snake_case_ : str = ffmpeg_process.communicate(lowerCAmelCase__ ) except FileNotFoundError as error: raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error snake_case_ : str = output_stream[0] snake_case_ : List[Any] = np.frombuffer(lowerCAmelCase__ , np.floataa ) if audio.shape[0] == 0: raise ValueError("""Malformed soundfile""" ) return audio def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] = "f32le" , ): """simple docstring""" snake_case_ : int = f'{sampling_rate}' snake_case_ : str = """1""" if format_for_conversion == "s16le": snake_case_ : Union[str, Any] = 2 elif format_for_conversion == "f32le": snake_case_ : Dict = 4 else: raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' ) snake_case_ : Optional[int] = platform.system() if system == "Linux": snake_case_ : Dict = """alsa""" snake_case_ : List[str] = """default""" elif system == "Darwin": snake_case_ : str = """avfoundation""" snake_case_ : Tuple = """:0""" elif system == "Windows": snake_case_ : Union[str, Any] = """dshow""" snake_case_ : Tuple = """default""" snake_case_ : Dict = [ """ffmpeg""", """-f""", format_, """-i""", input_, """-ac""", ac, """-ar""", ar, """-f""", format_for_conversion, """-fflags""", """nobuffer""", """-hide_banner""", """-loglevel""", """quiet""", """pipe:1""", ] snake_case_ : Union[str, Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample snake_case_ : str = _ffmpeg_stream(lowerCAmelCase__ , lowerCAmelCase__ ) for item in iterator: yield item def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Any = "f32le" , ): """simple docstring""" if stream_chunk_s is not None: snake_case_ : Any = stream_chunk_s else: snake_case_ : Optional[Any] = chunk_length_s snake_case_ : List[Any] = ffmpeg_microphone(lowerCAmelCase__ , lowerCAmelCase__ , format_for_conversion=lowerCAmelCase__ ) if format_for_conversion == "s16le": snake_case_ : Optional[Any] = np.intaa snake_case_ : Union[str, Any] = 2 elif format_for_conversion == "f32le": snake_case_ : List[str] = np.floataa snake_case_ : Any = 4 else: raise ValueError(f'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' ) if stride_length_s is None: snake_case_ : Dict = chunk_length_s / 6 snake_case_ : str = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCAmelCase__ , (int, float) ): snake_case_ : List[str] = [stride_length_s, stride_length_s] snake_case_ : int = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample snake_case_ : Dict = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample snake_case_ : int = datetime.datetime.now() snake_case_ : Union[str, Any] = datetime.timedelta(seconds=lowerCAmelCase__ ) for item in chunk_bytes_iter(lowerCAmelCase__ , lowerCAmelCase__ , stride=(stride_left, stride_right) , stream=lowerCAmelCase__ ): # Put everything back in numpy scale snake_case_ : Tuple = np.frombuffer(item["""raw"""] , dtype=lowerCAmelCase__ ) snake_case_ : Any = ( item["""stride"""][0] // size_of_sample, item["""stride"""][1] // size_of_sample, ) snake_case_ : Optional[Any] = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 1_0 * delta: # We're late !! SKIP continue yield item def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple = False ): """simple docstring""" snake_case_ : Tuple = b"""""" snake_case_ , snake_case_ : Optional[int] = stride if stride_left + stride_right >= chunk_len: raise ValueError( f'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' ) snake_case_ : Optional[int] = 0 for raw in iterator: acc += raw if stream and len(lowerCAmelCase__ ) < chunk_len: snake_case_ : List[str] = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCAmelCase__ ) >= chunk_len: # We are flushing the accumulator snake_case_ : Dict = (_stride_left, stride_right) snake_case_ : Optional[int] = {"""raw""": acc[:chunk_len], """stride""": stride} if stream: snake_case_ : Optional[Any] = False yield item snake_case_ : List[Any] = stride_left snake_case_ : int = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCAmelCase__ ) > stride_left: snake_case_ : str = {"""raw""": acc, """stride""": (_stride_left, 0)} if stream: snake_case_ : Union[str, Any] = False yield item def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" snake_case_ : str = 2**2_4 # 16Mo try: with subprocess.Popen(lowerCAmelCase__ , stdout=subprocess.PIPE , bufsize=lowerCAmelCase__ ) as ffmpeg_process: while True: snake_case_ : Any = ffmpeg_process.stdout.read(lowerCAmelCase__ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
714
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin a_ = get_tests_dir('''fixtures/test_sentencepiece.model''') a_ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''} a_ = '''>>zh<<''' a_ = '''Helsinki-NLP/''' if is_torch_available(): a_ = '''pt''' elif is_tf_available(): a_ = '''tf''' else: a_ = '''jax''' @require_sentencepiece class __lowercase ( _UpperCAmelCase , unittest.TestCase): """simple docstring""" _A : str = MarianTokenizer _A : List[str] = False _A : List[str] = True def __UpperCamelCase (self ): super().setUp() snake_case_ : Optional[int] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] snake_case_ : Any = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) ) snake_case_ : Any = Path(self.tmpdirname ) save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] ) save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] ) copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] ) snake_case_ : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __UpperCamelCase (self , **lowercase__ ): return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ ) def __UpperCamelCase (self , lowercase__ ): return ( "This is a test", "This is a test", ) def __UpperCamelCase (self ): snake_case_ : Union[str, Any] = """</s>""" snake_case_ : Tuple = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ ) def __UpperCamelCase (self ): snake_case_ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """</s>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(lowercase__ ) , 9 ) def __UpperCamelCase (self ): self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def __UpperCamelCase (self ): snake_case_ : Any = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' ) snake_case_ : Tuple = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) snake_case_ : Dict = [38, 1_21, 14, 6_97, 3_88_48, 0] self.assertListEqual(lowercase__ , batch.input_ids[0] ) snake_case_ : Tuple = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(lowercase__ ) snake_case_ : str = [x.name for x in Path(lowercase__ ).glob("""*""" )] self.assertIn("""source.spm""" , lowercase__ ) MarianTokenizer.from_pretrained(lowercase__ ) def __UpperCamelCase (self ): snake_case_ : Union[str, Any] = self.get_tokenizer() snake_case_ : List[str] = tok( ["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(batch.input_ids.shape , (2, 5_12) ) def __UpperCamelCase (self ): snake_case_ : Tuple = self.get_tokenizer() snake_case_ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def __UpperCamelCase (self ): # fmt: off snake_case_ : str = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , ) def __UpperCamelCase (self ): snake_case_ : Any = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" ) snake_case_ : Dict = """Tämä on testi""" snake_case_ : List[Any] = """This is a test""" snake_case_ : Optional[int] = [76, 7, 20_47, 2] snake_case_ : List[str] = [69, 12, 11, 9_40, 2] snake_case_ : Any = tokenizer(lowercase__ ).input_ids self.assertListEqual(lowercase__ , lowercase__ ) snake_case_ : str = tokenizer(text_target=lowercase__ ).input_ids self.assertListEqual(lowercase__ , lowercase__ ) snake_case_ : int = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ ) self.assertEqual(lowercase__ , lowercase__ )
48
0
"""simple docstring""" from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] ): """simple docstring""" snake_case_ : Union[str, Any] = 0 snake_case_ : Optional[int] = len(__A ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: snake_case_ : str = i + 1 else: snake_case_ : str = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(F'''{two_pointer([2, 7, 11, 15], 9) = }''')
715
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=_UpperCAmelCase) class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True}) _A : ClassVar[Features] = Features({"""audio""": Audio()}) _A : ClassVar[Features] = Features({"""transcription""": Value("""string""")}) _A : str = "audio" _A : str = "transcription" def __UpperCamelCase (self , lowercase__ ): if self.audio_column not in features: raise ValueError(f'Column {self.audio_column} is not present in features.' ) if not isinstance(features[self.audio_column] , lowercase__ ): raise ValueError(f'Column {self.audio_column} is not an Audio type.' ) snake_case_ : Optional[int] = copy.deepcopy(self ) snake_case_ : Tuple = self.input_schema.copy() snake_case_ : List[str] = features[self.audio_column] snake_case_ : Any = input_schema return task_template @property def __UpperCamelCase (self ): return {self.audio_column: "audio", self.transcription_column: "transcription"}
48
0
"""simple docstring""" from datetime import datetime as dt import os from github import Github a_ = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''feature request''', '''new model''', '''wip''', ] def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : List[Any] = Github(os.environ["""GITHUB_TOKEN"""] ) snake_case_ : Union[str, Any] = g.get_repo("""huggingface/transformers""" ) snake_case_ : List[Any] = repo.get_issues(state="""open""" ) for issue in open_issues: snake_case_ : Optional[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE__ : i.created_at , reverse=_lowerCamelCase ) snake_case_ : Tuple = comments[0] if len(_lowerCamelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state="""closed""" ) elif ( (dt.utcnow() - issue.updated_at).days > 2_3 and (dt.utcnow() - issue.created_at).days >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( """This issue has been automatically marked as stale because it has not had """ """recent activity. If you think this still needs to be addressed """ """please comment on this thread.\n\nPlease note that issues that do not follow the """ """[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """ """are likely to be ignored.""" ) if __name__ == "__main__": main()
716
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL a_ = logging.get_logger(__name__) class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : int = ["""pixel_values"""] def __init__(self , lowercase__ = True , lowercase__ = None , lowercase__ = 0.9 , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = 1 / 2_55 , lowercase__ = True , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ): super().__init__(**lowercase__ ) snake_case_ : Tuple = size if size is not None else {"""shortest_edge""": 2_24} snake_case_ : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ ) snake_case_ : str = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} snake_case_ : Dict = get_size_dict(lowercase__ , param_name="""crop_size""" ) snake_case_ : Union[str, Any] = do_resize snake_case_ : List[str] = size snake_case_ : str = crop_pct snake_case_ : str = resample snake_case_ : Optional[Any] = do_center_crop snake_case_ : Dict = crop_size snake_case_ : int = do_rescale snake_case_ : Optional[int] = rescale_factor snake_case_ : str = do_normalize snake_case_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN snake_case_ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ): snake_case_ : Tuple = get_size_dict(lowercase__ , default_to_square=lowercase__ ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' ) if crop_pct is not None: if "shortest_edge" in size: snake_case_ : Optional[int] = int(size["""shortest_edge"""] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: snake_case_ : Dict = int(size["""height"""] / crop_pct ) else: snake_case_ : List[str] = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct )) else: raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) ) snake_case_ : List[Any] = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ ) else: if "shortest_edge" in size: snake_case_ : Optional[int] = get_resize_output_image_size(lowercase__ , size=size["""shortest_edge"""] , default_to_square=lowercase__ ) elif "height" in size and "width" in size: snake_case_ : int = (size["""height"""], size["""width"""]) else: raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) ) return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ): snake_case_ : int = get_size_dict(lowercase__ ) if "height" not in size or "width" not in size: raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' ) return center_crop(lowercase__ , size=(size["""height"""], size["""width"""]) , data_format=lowercase__ , **lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ): return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ): return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ): snake_case_ : str = do_resize if do_resize is not None else self.do_resize snake_case_ : Any = crop_pct if crop_pct is not None else self.crop_pct snake_case_ : List[Any] = resample if resample is not None else self.resample snake_case_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ : str = do_normalize if do_normalize is not None else self.do_normalize snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean snake_case_ : int = image_std if image_std is not None else self.image_std snake_case_ : List[Any] = size if size is not None else self.size snake_case_ : Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ ) snake_case_ : List[Any] = crop_size if crop_size is not None else self.crop_size snake_case_ : int = get_size_dict(lowercase__ , param_name="""crop_size""" ) snake_case_ : List[str] = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_pct is None: raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. snake_case_ : int = [to_numpy_array(lowercase__ ) for image in images] if do_resize: snake_case_ : str = [self.resize(image=lowercase__ , size=lowercase__ , crop_pct=lowercase__ , resample=lowercase__ ) for image in images] if do_center_crop: snake_case_ : Optional[int] = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images] if do_rescale: snake_case_ : List[Any] = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images] if do_normalize: snake_case_ : Optional[Any] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images] snake_case_ : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images] snake_case_ : Dict = {"""pixel_values""": images} return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
48
0
"""simple docstring""" import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" snake_case_ : Dict = args.log_outputs snake_case_ : Tuple = "_".join(args.dataset.split("""/""" ) + [args.config, args.split] ) # load metric snake_case_ : int = load_metric("""wer""" ) snake_case_ : List[Any] = load_metric("""cer""" ) # compute metrics snake_case_ : Tuple = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] ) snake_case_ : Dict = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] ) # print & log results snake_case_ : List[Any] = f'WER: {wer_result}\nCER: {cer_result}' print(_lowerCamelCase ) with open(f'{dataset_id}_eval_results.txt' , """w""" ) as f: f.write(_lowerCamelCase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: snake_case_ : Dict = f'log_{dataset_id}_predictions.txt' snake_case_ : List[str] = f'log_{dataset_id}_targets.txt' with open(_lowerCamelCase , """w""" ) as p, open(_lowerCamelCase , """w""" ) as t: # mapping function to write output def write_to_file(SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ): p.write(f'{i}' + """\n""" ) p.write(batch["""prediction"""] + """\n""" ) t.write(f'{i}' + """\n""" ) t.write(batch["""target"""] + """\n""" ) result.map(_lowerCamelCase , with_indices=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple ): """simple docstring""" snake_case_ : Any = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training snake_case_ : Tuple = re.sub(_lowerCamelCase , """""" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! snake_case_ : Union[str, Any] = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: snake_case_ : Optional[Any] = " ".join(text.split(_lowerCamelCase ) ) return text def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" snake_case_ : List[Any] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_lowerCamelCase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor snake_case_ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) snake_case_ : List[str] = feature_extractor.sampling_rate # resample audio snake_case_ : Tuple = dataset.cast_column("""audio""" , Audio(sampling_rate=_lowerCamelCase ) ) # load eval pipeline if args.device is None: snake_case_ : Optional[Any] = 0 if torch.cuda.is_available() else -1 snake_case_ : Any = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(SCREAMING_SNAKE_CASE__ : List[Any] ): snake_case_ : Any = asr( batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) snake_case_ : Union[str, Any] = prediction["text"] snake_case_ : Dict = normalize_text(batch["""sentence"""] ) return batch # run inference on all examples snake_case_ : int = dataset.map(_lowerCamelCase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(_lowerCamelCase , _lowerCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument( '''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers''' ) parser.add_argument( '''--dataset''', type=str, required=True, help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''', ) parser.add_argument( '''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice''' ) parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''') parser.add_argument( '''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.''' ) parser.add_argument( '''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.''' ) parser.add_argument( '''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.''' ) parser.add_argument( '''--device''', type=int, default=None, help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''', ) a_ = parser.parse_args() main(args)
717
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: a_ = None a_ = logging.get_logger(__name__) a_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} a_ = { '''vocab_file''': { '''facebook/mbart-large-en-ro''': ( '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model''' ), '''facebook/mbart-large-cc25''': ( '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''', '''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''', }, } a_ = { '''facebook/mbart-large-en-ro''': 1024, '''facebook/mbart-large-cc25''': 1024, } # fmt: off a_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN'''] class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Dict = VOCAB_FILES_NAMES _A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _A : str = ["""input_ids""", """attention_mask"""] _A : Tuple = MBartTokenizer _A : List[int] = [] _A : List[int] = [] def __init__(self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ): # Mask token behave like a normal word, i.e. include the space before it snake_case_ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token super().__init__( vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , ) snake_case_ : Dict = vocab_file snake_case_ : Optional[int] = False if not self.vocab_file else True snake_case_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} ) snake_case_ : Any = { lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } snake_case_ : Tuple = src_lang if src_lang is not None else """en_XX""" snake_case_ : Tuple = self.convert_tokens_to_ids(self._src_lang ) snake_case_ : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __UpperCamelCase (self ): return self._src_lang @src_lang.setter def __UpperCamelCase (self , lowercase__ ): snake_case_ : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __UpperCamelCase (self , lowercase__ , lowercase__ = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __UpperCamelCase (self , lowercase__ , lowercase__ = None ): snake_case_ : List[Any] = [self.sep_token_id] snake_case_ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ): if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) snake_case_ : int = src_lang snake_case_ : List[str] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ ) snake_case_ : List[str] = self.convert_tokens_to_ids(lowercase__ ) snake_case_ : Union[str, Any] = tgt_lang_id return inputs def __UpperCamelCase (self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ): snake_case_ : List[str] = src_lang snake_case_ : int = tgt_lang return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ ) def __UpperCamelCase (self ): return self.set_src_lang_special_tokens(self.src_lang ) def __UpperCamelCase (self ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __UpperCamelCase (self , lowercase__ ): snake_case_ : int = self.convert_tokens_to_ids(lowercase__ ) snake_case_ : Tuple = [] snake_case_ : List[Any] = [self.eos_token_id, self.cur_lang_code] snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens ) snake_case_ : Optional[int] = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __UpperCamelCase (self , lowercase__ ): snake_case_ : Tuple = self.convert_tokens_to_ids(lowercase__ ) snake_case_ : Optional[int] = [] snake_case_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code] snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) snake_case_ : int = self.convert_ids_to_tokens(self.suffix_tokens ) snake_case_ : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __UpperCamelCase (self , lowercase__ , lowercase__ = None ): if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(lowercase__ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory.' ) return snake_case_ : List[str] = os.path.join( lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ): copyfile(self.vocab_file , lowercase__ ) return (out_vocab_file,)
48
0
"""simple docstring""" import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class __lowercase ( nn.Module): """simple docstring""" _A : List[Any] = 42 _A : List[Any] = 42 _A : Union[str, Any] = 0.0 _A : List[Any] = 1 _A : str = 1 _A : Tuple = True _A : str = False _A : Dict = False _A : List[str] = False _A : Optional[int] = jnp.floataa def __UpperCamelCase (self ): snake_case_ : Optional[int] = [] snake_case_ : Optional[int] = [] for i in range(self.num_layers ): snake_case_ : Optional[int] = self.in_channels if i == 0 else self.out_channels snake_case_ : List[Any] = FlaxResnetBlockaD( in_channels=lowerCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowerCamelCase_ ) snake_case_ : Any = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(lowerCamelCase_ ) snake_case_ : List[Any] = resnets snake_case_ : List[str] = attentions if self.add_downsample: snake_case_ : Union[str, Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__(self , lowercase__ , lowercase__ , lowercase__ , lowercase__=True ): snake_case_ : int = () for resnet, attn in zip(self.resnets , self.attentions ): snake_case_ : int = resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ ) snake_case_ : str = attn(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ ) output_states += (hidden_states,) if self.add_downsample: snake_case_ : Union[str, Any] = self.downsamplers_a(lowerCamelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class __lowercase ( nn.Module): """simple docstring""" _A : List[Any] = 42 _A : Any = 42 _A : Dict = 0.0 _A : Tuple = 1 _A : int = True _A : Optional[int] = jnp.floataa def __UpperCamelCase (self ): snake_case_ : str = [] for i in range(self.num_layers ): snake_case_ : Optional[Any] = self.in_channels if i == 0 else self.out_channels snake_case_ : Optional[int] = FlaxResnetBlockaD( in_channels=lowerCamelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowerCamelCase_ ) snake_case_ : str = resnets if self.add_downsample: snake_case_ : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__(self , lowercase__ , lowercase__ , lowercase__=True ): snake_case_ : Optional[int] = () for resnet in self.resnets: snake_case_ : int = resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ ) output_states += (hidden_states,) if self.add_downsample: snake_case_ : Optional[int] = self.downsamplers_a(lowerCamelCase_ ) output_states += (hidden_states,) return hidden_states, output_states class __lowercase ( nn.Module): """simple docstring""" _A : Any = 42 _A : int = 42 _A : Any = 42 _A : str = 0.0 _A : int = 1 _A : str = 1 _A : Tuple = True _A : Union[str, Any] = False _A : Tuple = False _A : Dict = False _A : List[str] = jnp.floataa def __UpperCamelCase (self ): snake_case_ : int = [] snake_case_ : List[Any] = [] for i in range(self.num_layers ): snake_case_ : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels snake_case_ : Dict = self.prev_output_channel if i == 0 else self.out_channels snake_case_ : Tuple = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowerCamelCase_ ) snake_case_ : List[str] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(lowerCamelCase_ ) snake_case_ : str = resnets snake_case_ : Tuple = attentions if self.add_upsample: snake_case_ : int = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__(self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states snake_case_ : Any = res_hidden_states_tuple[-1] snake_case_ : Any = res_hidden_states_tuple[:-1] snake_case_ : Dict = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) snake_case_ : Optional[Any] = resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ ) snake_case_ : List[str] = attn(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ ) if self.add_upsample: snake_case_ : Optional[int] = self.upsamplers_a(lowerCamelCase_ ) return hidden_states class __lowercase ( nn.Module): """simple docstring""" _A : int = 42 _A : Optional[int] = 42 _A : Any = 42 _A : List[Any] = 0.0 _A : Any = 1 _A : Dict = True _A : List[str] = jnp.floataa def __UpperCamelCase (self ): snake_case_ : Any = [] for i in range(self.num_layers ): snake_case_ : Optional[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels snake_case_ : List[str] = self.prev_output_channel if i == 0 else self.out_channels snake_case_ : List[Any] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowerCamelCase_ ) snake_case_ : Union[str, Any] = resnets if self.add_upsample: snake_case_ : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__(self , lowercase__ , lowercase__ , lowercase__ , lowercase__=True ): for resnet in self.resnets: # pop res hidden states snake_case_ : List[str] = res_hidden_states_tuple[-1] snake_case_ : Tuple = res_hidden_states_tuple[:-1] snake_case_ : Optional[int] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) snake_case_ : str = resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ ) if self.add_upsample: snake_case_ : Union[str, Any] = self.upsamplers_a(lowerCamelCase_ ) return hidden_states class __lowercase ( nn.Module): """simple docstring""" _A : Any = 42 _A : List[Any] = 0.0 _A : Optional[int] = 1 _A : Optional[int] = 1 _A : Union[str, Any] = False _A : int = False _A : Tuple = jnp.floataa def __UpperCamelCase (self ): snake_case_ : Any = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] snake_case_ : int = [] for _ in range(self.num_layers ): snake_case_ : Tuple = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(lowerCamelCase_ ) snake_case_ : Optional[Any] = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(lowerCamelCase_ ) snake_case_ : Optional[Any] = resnets snake_case_ : Any = attentions def __call__(self , lowercase__ , lowercase__ , lowercase__ , lowercase__=True ): snake_case_ : int = self.resnets[0](lowerCamelCase_ , lowerCamelCase_ ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): snake_case_ : str = attn(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ ) snake_case_ : Optional[int] = resnet(lowerCamelCase_ , lowerCamelCase_ , deterministic=lowerCamelCase_ ) return hidden_states
718
"""simple docstring""" import argparse import hashlib # hashlib is only used inside the Test class import struct class __lowercase : """simple docstring""" def __init__(self , lowercase__ ): snake_case_ : Union[str, Any] = data snake_case_ : List[str] = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0] @staticmethod def __UpperCamelCase (lowercase__ , lowercase__ ): return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f def __UpperCamelCase (self ): snake_case_ : Any = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64) snake_case_ : Tuple = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) ) return padded_data def __UpperCamelCase (self ): return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def __UpperCamelCase (self , lowercase__ ): snake_case_ : int = list(struct.unpack(""">16L""" , lowercase__ ) ) + [0] * 64 for i in range(16 , 80 ): snake_case_ : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def __UpperCamelCase (self ): snake_case_ : List[Any] = self.padding() snake_case_ : Any = self.split_blocks() for block in self.blocks: snake_case_ : Any = self.expand_block(lowercase__ ) snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = self.h for i in range(0 , 80 ): if 0 <= i < 20: snake_case_ : Optional[Any] = (b & c) | ((~b) & d) snake_case_ : List[str] = 0X5_a_8_2_7_9_9_9 elif 20 <= i < 40: snake_case_ : Union[str, Any] = b ^ c ^ d snake_case_ : Tuple = 0X6_e_d_9_e_b_a_1 elif 40 <= i < 60: snake_case_ : str = (b & c) | (b & d) | (c & d) snake_case_ : List[str] = 0X8_f_1_b_b_c_d_c elif 60 <= i < 80: snake_case_ : Tuple = b ^ c ^ d snake_case_ : str = 0Xc_a_6_2_c_1_d_6 snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[Any] = ( self.rotate(lowercase__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f, a, self.rotate(lowercase__ , 30 ), c, d, ) snake_case_ : Any = ( self.h[0] + a & 0Xf_f_f_f_f_f_f_f, self.h[1] + b & 0Xf_f_f_f_f_f_f_f, self.h[2] + c & 0Xf_f_f_f_f_f_f_f, self.h[3] + d & 0Xf_f_f_f_f_f_f_f, self.h[4] + e & 0Xf_f_f_f_f_f_f_f, ) return ("{:08x}" * 5).format(*self.h ) def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : Union[str, Any] = b"""Test String""" assert SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE__ ).hexdigest() # noqa: S324 def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : int = argparse.ArgumentParser(description="""Process some strings or files""" ) parser.add_argument( """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) snake_case_ : Optional[int] = parser.parse_args() snake_case_ : Optional[int] = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: snake_case_ : List[str] = f.read() else: snake_case_ : Dict = bytes(SCREAMING_SNAKE_CASE__ , """utf-8""" ) print(SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
48
0
"""simple docstring""" import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" snake_case_ : Any = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(_A , _A ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ): """simple docstring""" snake_case_ , snake_case_ : Union[str, Any] = emb.weight.shape snake_case_ : Any = nn.Linear(_A , _A , bias=_A ) snake_case_ : Dict = emb.weight.data return lin_layer def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict=None ): """simple docstring""" snake_case_ : int = {} for old_key in state_dict.keys(): snake_case_ : Optional[Any] = old_key if "moe_layer.experts." in key: if expert_idx is not None: snake_case_ : List[Any] = key.replace("""moe_layer.experts.0""" , f'ffn.experts.expert_{expert_idx}' ) else: snake_case_ : Optional[Any] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" ) if "gate" in key: snake_case_ : Optional[Any] = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" ) if "fc2" and "experts" not in key: snake_case_ : Tuple = key.replace(""".fc2.""" , """.ffn.fc2.""" ) if "fc1" and "experts" not in key: snake_case_ : Tuple = key.replace(""".fc1.""" , """.ffn.fc1.""" ) if ".encoder_attn." in key: snake_case_ : int = key.replace(""".encoder_attn.""" , """.cross_attention.""" ) if "encoder_attn_layer_norm" in key: snake_case_ : Union[str, Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" ) if "final_layer_norm" in key: snake_case_ : str = key.replace("""final_layer_norm""" , """ff_layer_norm""" ) snake_case_ : Union[str, Any] = state_dict[old_key] return new_dict def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str = WEIGHTS_NAME ): """simple docstring""" snake_case_ : Any = [] snake_case_ : List[Any] = 0 os.makedirs(_A , exist_ok=_A ) for expert in range(_A ): snake_case_ : str = switch_checkpoint_path + f'-rank-{expert}.pt' if os.path.isfile(_A ): snake_case_ : Optional[int] = torch.load(_A )["""model"""] remove_ignore_keys_(_A ) snake_case_ : List[Any] = rename_fairseq_keys(_A , _A ) snake_case_ : Optional[Any] = os.path.join( _A , weights_name.replace(""".bin""" , f'-{len(_A )+1:05d}-of-???.bin' ) ) torch.save(_A , _A ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(_A )[0]].dtype ) # Add the last block snake_case_ : Optional[Any] = os.path.join(_A , weights_name.replace(""".bin""" , f'-{len(_A )+1:05d}-of-???.bin' ) ) snake_case_ : List[str] = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""] remove_ignore_keys_(_A ) snake_case_ : List[Any] = rename_fairseq_keys(_A , _A ) snake_case_ : List[str] = shared_weights["""decoder.embed_tokens.weight"""] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(_A ) == 1: snake_case_ : Optional[Any] = os.path.join(_A , _A ) torch.save(_A , _A ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(_A , _A ) # Otherwise, let's build the index snake_case_ : Dict = {} for idx, shard in enumerate(_A ): snake_case_ : str = weights_name.replace(""".bin""" , f'-{idx+1:05d}-of-{len(_A ):05d}.bin' ) snake_case_ : Dict = os.path.join(_A , weights_name.replace(""".bin""" , f'-{idx+1:05d}-of-???.bin' ) ) os.rename(_A , os.path.join(_A , _A ) ) for key in shard: snake_case_ : Dict = shard_file # Add the metadata snake_case_ : Optional[int] = {"""total_size""": total_size} snake_case_ : str = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(_A , _A ) , """w""" , encoding="""utf-8""" ) as f: snake_case_ : Dict = json.dumps(_A , indent=2 , sort_keys=_A ) + """\n""" f.write(_A ) return metadata, index if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--nllb_moe_checkpoint_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''', type=str, required=False, help='''Path to a directory containing a folder per layer. Follows the original Google format.''', ) parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''') parser.add_argument( '''--pytorch_dump_folder_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''', type=str, required=False, help='''Path to the output pytorch model.''', ) a_ = parser.parse_args() a_ = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) a_ = NllbMoeConfig.from_pretrained( '''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) a_ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('''Done''') model.save_pretrained(args.pytorch_dump_folder_path)
719
"""simple docstring""" from manim import * class __lowercase ( _UpperCAmelCase): """simple docstring""" def __UpperCamelCase (self ): snake_case_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 ) snake_case_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )] snake_case_ : str = [mem.copy() for i in range(6 )] snake_case_ : str = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : Any = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : List[str] = VGroup(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : List[Any] = Text("""CPU""" , font_size=24 ) snake_case_ : Tuple = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowercase__ ) snake_case_ : List[Any] = [mem.copy() for i in range(4 )] snake_case_ : Tuple = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : List[str] = Text("""GPU""" , font_size=24 ) snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ ) gpu.move_to([-1, -1, 0] ) self.add(lowercase__ ) snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )] snake_case_ : List[Any] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : Dict = Text("""Model""" , font_size=24 ) snake_case_ : int = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ ) model.move_to([3, -1.0, 0] ) self.add(lowercase__ ) snake_case_ : Dict = [] for i, rect in enumerate(lowercase__ ): rect.set_stroke(lowercase__ ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) snake_case_ : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase__ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase__ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=lowercase__ , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase__ , buff=0.0 ) self.add(lowercase__ ) cpu_targs.append(lowercase__ ) snake_case_ : List[str] = [mem.copy() for i in range(6 )] snake_case_ : List[str] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : str = Text("""Loaded Checkpoint""" , font_size=24 ) snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , aligned_edge=lowercase__ , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) snake_case_ : Optional[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) snake_case_ : Union[str, Any] = MarkupText( f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowercase__ , lowercase__ ) snake_case_ : List[Any] = MarkupText( f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , ) blue_text.next_to(lowercase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) snake_case_ : List[Any] = MarkupText( f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowercase__ ) , Write(lowercase__ ) ) self.play(Write(lowercase__ , run_time=1 ) , Create(lowercase__ , run_time=1 ) ) snake_case_ : Optional[int] = [] snake_case_ : List[str] = [] for i, rect in enumerate(lowercase__ ): snake_case_ : Optional[Any] = fill.copy().set_fill(lowercase__ , opacity=0.7 ) target.move_to(lowercase__ ) first_animations.append(GrowFromCenter(lowercase__ , run_time=1 ) ) snake_case_ : List[Any] = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(lowercase__ , run_time=1.5 ) ) self.play(*lowercase__ ) self.play(*lowercase__ ) self.wait()
48
0
"""simple docstring""" import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __lowercase ( _a , unittest.TestCase): """simple docstring""" pass @nightly @require_onnxruntime @require_torch_gpu class __lowercase ( unittest.TestCase): """simple docstring""" @property def __UpperCamelCase (self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __UpperCamelCase (self ): snake_case_ : str = ort.SessionOptions() snake_case_ : Any = False return options def __UpperCamelCase (self ): snake_case_ : Dict = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) snake_case_ : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) snake_case_ : Dict = OnnxStableDiffusionInpaintPipeline.from_pretrained( """runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=snake_case_ ) snake_case_ : Dict = """A red cat sitting on a park bench""" snake_case_ : List[str] = np.random.RandomState(0 ) snake_case_ : Optional[int] = pipe( prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type="""np""" , ) snake_case_ : Optional[int] = output.images snake_case_ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) snake_case_ : Tuple = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __UpperCamelCase (self ): snake_case_ : List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo.png""" ) snake_case_ : List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" ) snake_case_ : List[str] = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" ) snake_case_ : List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained( """runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=snake_case_ ) snake_case_ : List[Any] = """A red cat sitting on a park bench""" snake_case_ : str = np.random.RandomState(0 ) snake_case_ : Optional[int] = pipe( prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case_ , output_type="""np""" , ) snake_case_ : Union[str, Any] = output.images snake_case_ : str = images[0, 2_55:2_58, 2_55:2_58, -1] assert images.shape == (1, 5_12, 5_12, 3) snake_case_ : Optional[int] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
720
"""simple docstring""" from random import randint from tempfile import TemporaryFile import numpy as np def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" snake_case_ : Union[str, Any] = 0 if start < end: snake_case_ : Union[str, Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ : List[Any] = a[end] snake_case_ : Dict = a[pivot] snake_case_ : Any = temp snake_case_ , snake_case_ : Dict = _in_place_partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , p - 1 ) count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , p + 1 , SCREAMING_SNAKE_CASE__ ) return count def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ): """simple docstring""" snake_case_ : Tuple = 0 snake_case_ : List[Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ : Dict = a[end] snake_case_ : List[Any] = a[pivot] snake_case_ : Optional[Any] = temp snake_case_ : List[str] = start - 1 for index in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value snake_case_ : Any = new_pivot_index + 1 snake_case_ : Tuple = a[new_pivot_index] snake_case_ : Optional[int] = a[index] snake_case_ : Tuple = temp snake_case_ : Union[str, Any] = a[new_pivot_index + 1] snake_case_ : Union[str, Any] = a[end] snake_case_ : Union[str, Any] = temp return new_pivot_index + 1, count a_ = TemporaryFile() a_ = 100 # 1000 elements are to be sorted a_ , a_ = 0, 1 # mean and standard deviation a_ = np.random.normal(mu, sigma, p) np.save(outfile, X) print('''The array is''') print(X) outfile.seek(0) # using the same array a_ = np.load(outfile) a_ = len(M) - 1 a_ = _in_place_quick_sort(M, 0, r) print( '''No of Comparisons for 100 elements selected from a standard normal distribution''' '''is :''' ) print(z)
48
0
"""simple docstring""" from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __lowercase ( SCREAMING_SNAKE_CASE__): """simple docstring""" def __init__(self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , **lowercase__ , ): super().__init__( _lowercase , split=_lowercase , features=_lowercase , cache_dir=_lowercase , keep_in_memory=_lowercase , streaming=_lowercase , num_proc=_lowercase , **_lowercase , ) snake_case_ : Optional[Any] = path_or_paths if isinstance(_lowercase , _lowercase ) else {self.split: path_or_paths} snake_case_ : Optional[int] = Text( cache_dir=_lowercase , data_files=_lowercase , features=_lowercase , **_lowercase , ) def __UpperCamelCase (self ): if self.streaming: snake_case_ : Optional[int] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: snake_case_ : Optional[int] = None snake_case_ : Optional[int] = None snake_case_ : Union[str, Any] = None snake_case_ : List[Any] = None self.builder.download_and_prepare( download_config=_lowercase , download_mode=_lowercase , verification_mode=_lowercase , base_path=_lowercase , num_proc=self.num_proc , ) snake_case_ : Optional[Any] = self.builder.as_dataset( split=self.split , verification_mode=_lowercase , in_memory=self.keep_in_memory ) return dataset
721
"""simple docstring""" import random def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : bool = False ): """simple docstring""" snake_case_ : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE__ )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(SCREAMING_SNAKE_CASE__ ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(SCREAMING_SNAKE_CASE__ ): for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ): if random.random() < probability: graph[i].append(SCREAMING_SNAKE_CASE__ ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(SCREAMING_SNAKE_CASE__ ) return graph def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" return { i: [j for j in range(SCREAMING_SNAKE_CASE__ ) if i != j] for i in range(SCREAMING_SNAKE_CASE__ ) } if __name__ == "__main__": import doctest doctest.testmod()
48
0
"""simple docstring""" import math def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" snake_case_ : Optional[int] = len(snake_case__ ) snake_case_ : str = int(math.floor(math.sqrt(snake_case__ ) ) ) snake_case_ : Optional[Any] = 0 while arr[min(snake_case__ , snake_case__ ) - 1] < x: snake_case_ : Optional[Any] = step step += int(math.floor(math.sqrt(snake_case__ ) ) ) if prev >= n: return -1 while arr[prev] < x: snake_case_ : Optional[Any] = prev + 1 if prev == min(snake_case__ , snake_case__ ): return -1 if arr[prev] == x: return prev return -1 if __name__ == "__main__": a_ = input('''Enter numbers separated by a comma:\n''').strip() a_ = [int(item) for item in user_input.split(''',''')] a_ = int(input('''Enter the number to be searched:\n''')) a_ = jump_search(arr, x) if res == -1: print('''Number not found!''') else: print(F'''Number {x} is at index {res}''')
700
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json''' ), } class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Optional[int] = """dpr""" def __init__(self , lowercase__=3_05_22 , lowercase__=7_68 , lowercase__=12 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__ = 0 , **lowercase__ , ): super().__init__(pad_token_id=lowercase__ , **lowercase__ ) snake_case_ : List[Any] = vocab_size snake_case_ : List[str] = hidden_size snake_case_ : Tuple = num_hidden_layers snake_case_ : List[Any] = num_attention_heads snake_case_ : int = hidden_act snake_case_ : Dict = intermediate_size snake_case_ : int = hidden_dropout_prob snake_case_ : Any = attention_probs_dropout_prob snake_case_ : str = max_position_embeddings snake_case_ : List[str] = type_vocab_size snake_case_ : List[str] = initializer_range snake_case_ : Optional[int] = layer_norm_eps snake_case_ : Union[str, Any] = projection_dim snake_case_ : str = position_embedding_type
48
0
"""simple docstring""" from math import factorial, pi def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] = 3_0 ): """simple docstring""" if not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ): raise ValueError("""maclaurin_sin() requires either an int or float for theta""" ) if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or accuracy <= 0: raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" ) snake_case_ : Union[str, Any] = float(__SCREAMING_SNAKE_CASE ) snake_case_ : Optional[int] = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__SCREAMING_SNAKE_CASE ) ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any = 3_0 ): """simple docstring""" if not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ): raise ValueError("""maclaurin_cos() requires either an int or float for theta""" ) if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or accuracy <= 0: raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" ) snake_case_ : List[Any] = float(__SCREAMING_SNAKE_CASE ) snake_case_ : str = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(10)) print(maclaurin_sin(-10)) print(maclaurin_sin(10, 15)) print(maclaurin_sin(-10, 15)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(10, 15)) print(maclaurin_cos(-10, 15))
701
"""simple docstring""" import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm a_ = re.compile('''[^A-Za-z_0-9]''') # parameters used in DuplicationIndex a_ = 10 a_ = 256 def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" if len(SCREAMING_SNAKE_CASE__ ) < MIN_NUM_TOKENS: return None snake_case_ : Union[str, Any] = MinHash(num_perm=SCREAMING_SNAKE_CASE__ ) for token in set(SCREAMING_SNAKE_CASE__ ): min_hash.update(token.encode() ) return min_hash def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE__ ) if len(t.strip() ) > 0} class __lowercase : """simple docstring""" def __init__(self , *, lowercase__ = 0.85 , ): snake_case_ : Tuple = duplication_jaccard_threshold snake_case_ : Optional[Any] = NUM_PERM snake_case_ : Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) snake_case_ : List[Any] = defaultdict(lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ ): snake_case_ : int = self._index.query(lowercase__ ) if code_key in self._index.keys: print(f'Duplicate key {code_key}' ) return self._index.insert(lowercase__ , lowercase__ ) if len(lowercase__ ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(lowercase__ ) break else: self._duplicate_clusters[close_duplicates[0]].add(lowercase__ ) def __UpperCamelCase (self ): snake_case_ : str = [] for base, duplicates in self._duplicate_clusters.items(): snake_case_ : Optional[Any] = [base] + list(lowercase__ ) # reformat the cluster to be a list of dict snake_case_ : Any = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster] duplicate_clusters.append(lowercase__ ) return duplicate_clusters def __UpperCamelCase (self , lowercase__ ): snake_case_ : int = self.get_duplicate_clusters() with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" snake_case_ , snake_case_ : str = element snake_case_ : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] ): """simple docstring""" with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ): if data is not None: yield data def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float ): """simple docstring""" snake_case_ : int = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE__ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE__ ) ) , max_queue_size=1_0_0 ) ): di.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" snake_case_ : int = get_tokens(SCREAMING_SNAKE_CASE__ ) snake_case_ : Tuple = get_tokens(SCREAMING_SNAKE_CASE__ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) a_ = None def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" snake_case_ : Optional[Any] = [] for elementa in cluster: snake_case_ : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""] for elementa in extremes: snake_case_ : Any = _shared_dataset[elementa["""base_index"""]]["""content"""] if jaccard_similarity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) >= jaccard_threshold: elementa["copies"] += 1 break else: snake_case_ : Union[str, Any] = 1 extremes.append(SCREAMING_SNAKE_CASE__ ) return extremes def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): """simple docstring""" global _shared_dataset snake_case_ : str = dataset snake_case_ : int = [] snake_case_ : Optional[int] = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE__ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) , total=len(SCREAMING_SNAKE_CASE__ ) , ): extremes_list.append(SCREAMING_SNAKE_CASE__ ) return extremes_list def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float = 0.85 ): """simple docstring""" snake_case_ : List[str] = make_duplicate_clusters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster} snake_case_ : str = {} snake_case_ : Dict = find_extremes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for extremes in extremes_clusters: for element in extremes: snake_case_ : int = element snake_case_ : Optional[int] = duplicate_indices - set(extreme_dict.keys() ) snake_case_ : List[Any] = dataset.filter(lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE__ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: snake_case_ : List[Any] = element["""base_index"""] in extreme_dict if element["is_extreme"]: snake_case_ : str = extreme_dict[element["""base_index"""]]["""copies"""] print(f'Original dataset size: {len(SCREAMING_SNAKE_CASE__ )}' ) print(f'Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE__ )}' ) print(f'Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' ) print(f'Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' ) print(f'Filtered dataset size: {len(SCREAMING_SNAKE_CASE__ )}' ) return ds_filter, duplicate_clusters
48
0
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = { '''post_extract_proj''': '''feature_projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.upsample.0''': '''encoder.upsample.projection''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''layer_norm''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ): """simple docstring""" for attribute in key.split(""".""" ): snake_case_ : Optional[int] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if weight_type is not None: snake_case_ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape else: snake_case_ : Dict = hf_pointer.shape assert hf_shape == value.shape, ( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": snake_case_ : Optional[Any] = value elif weight_type == "weight_g": snake_case_ : Optional[Any] = value elif weight_type == "weight_v": snake_case_ : Optional[int] = value elif weight_type == "bias": snake_case_ : str = value else: snake_case_ : str = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ): """simple docstring""" snake_case_ : List[Any] = [] snake_case_ : List[Any] = fairseq_model.state_dict() snake_case_ : Tuple = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): snake_case_ : Tuple = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == """group""" , ) snake_case_ : int = True else: for key, mapped_key in MAPPING.items(): snake_case_ : List[str] = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: snake_case_ : List[str] = True if "*" in mapped_key: snake_case_ : List[str] = name.split(SCREAMING_SNAKE_CASE__ )[0].split(""".""" )[-2] snake_case_ : Tuple = mapped_key.replace("""*""" , SCREAMING_SNAKE_CASE__ ) if "weight_g" in name: snake_case_ : int = """weight_g""" elif "weight_v" in name: snake_case_ : Dict = """weight_v""" elif "weight" in name: snake_case_ : Dict = """weight""" elif "bias" in name: snake_case_ : Tuple = """bias""" else: snake_case_ : Tuple = None set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE__ ) logger.warning(f'Unused weights: {unused_weights}' ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" snake_case_ : Optional[Any] = full_name.split("""conv_layers.""" )[-1] snake_case_ : List[Any] = name.split(""".""" ) snake_case_ : str = int(items[0] ) snake_case_ : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) snake_case_ : Dict = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) snake_case_ : Dict = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) snake_case_ : int = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) snake_case_ : Optional[Any] = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" snake_case_ : Optional[Any] = SEWConfig() if is_finetuned: snake_case_ : List[Any] = model.wav_encoder.wav_model.cfg else: snake_case_ : Dict = model.cfg snake_case_ : List[str] = fs_config.conv_bias snake_case_ : Tuple = eval(fs_config.conv_feature_layers ) snake_case_ : str = [x[0] for x in conv_layers] snake_case_ : Tuple = [x[1] for x in conv_layers] snake_case_ : List[str] = [x[2] for x in conv_layers] snake_case_ : Tuple = """gelu""" snake_case_ : Optional[int] = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group""" snake_case_ : Optional[Any] = 0.0 snake_case_ : Dict = fs_config.activation_fn.name snake_case_ : Optional[int] = fs_config.encoder_embed_dim snake_case_ : Optional[Any] = 0.02 snake_case_ : Optional[int] = fs_config.encoder_ffn_embed_dim snake_case_ : List[Any] = 1E-5 snake_case_ : int = fs_config.encoder_layerdrop snake_case_ : Union[str, Any] = fs_config.encoder_attention_heads snake_case_ : Union[str, Any] = fs_config.conv_pos_groups snake_case_ : List[Any] = fs_config.conv_pos snake_case_ : str = len(SCREAMING_SNAKE_CASE__ ) snake_case_ : Optional[int] = fs_config.encoder_layers snake_case_ : List[Any] = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: snake_case_ : Optional[Any] = model.cfg snake_case_ : Union[str, Any] = fs_config.final_dropout snake_case_ : int = fs_config.layerdrop snake_case_ : str = fs_config.activation_dropout snake_case_ : Optional[Any] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 snake_case_ : Optional[int] = fs_config.attention_dropout snake_case_ : Optional[int] = fs_config.dropout_input snake_case_ : Any = fs_config.dropout snake_case_ : List[Any] = fs_config.mask_channel_length snake_case_ : Tuple = fs_config.mask_channel_prob snake_case_ : Optional[Any] = fs_config.mask_length snake_case_ : Optional[int] = fs_config.mask_prob snake_case_ : Optional[int] = """Wav2Vec2FeatureExtractor""" snake_case_ : List[str] = """Wav2Vec2CTCTokenizer""" return config @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ): """simple docstring""" if is_finetuned: snake_case_ , snake_case_ , snake_case_ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: snake_case_ : int = SEWConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) else: snake_case_ : int = convert_config(model[0] , SCREAMING_SNAKE_CASE__ ) snake_case_ : Optional[int] = model[0].eval() snake_case_ : Optional[Any] = True if config.feat_extract_norm == """layer""" else False snake_case_ : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) if is_finetuned: if dict_path: snake_case_ : Tuple = Dictionary.load(SCREAMING_SNAKE_CASE__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case_ : Any = target_dict.pad_index snake_case_ : Optional[Any] = target_dict.bos_index snake_case_ : Union[str, Any] = target_dict.pad_index snake_case_ : Any = target_dict.bos_index snake_case_ : Any = target_dict.eos_index snake_case_ : Optional[int] = len(target_dict.symbols ) snake_case_ : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , """vocab.json""" ) if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(SCREAMING_SNAKE_CASE__ ) ) return os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices , SCREAMING_SNAKE_CASE__ ) snake_case_ : int = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=SCREAMING_SNAKE_CASE__ , ) snake_case_ : List[Any] = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) snake_case_ : List[str] = SEWForCTC(SCREAMING_SNAKE_CASE__ ) else: snake_case_ : Dict = SEWModel(SCREAMING_SNAKE_CASE__ ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ ) recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--is_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) a_ = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
702
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) a_ = logging.getLogger(__name__) if __name__ == "__main__": a_ = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=30522, type=int) a_ = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, '''rb''') as fp: a_ = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') a_ = Counter() for tk_ids in data: counter.update(tk_ids) a_ = [0] * args.vocab_size for k, v in counter.items(): a_ = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
48
0
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar a_ = TypeVar('''KEY''') a_ = TypeVar('''VAL''') @dataclass(frozen=UpperCamelCase_ , slots=UpperCamelCase_) class __lowercase ( Generic[KEY, VAL]): """simple docstring""" _A : List[Any] = 42 _A : int = 42 class __lowercase ( _Item): """simple docstring""" def __init__(self ): super().__init__(__A , __A ) def __bool__(self ): return False a_ = _DeletedItem() class __lowercase ( MutableMapping[KEY, VAL]): """simple docstring""" def __init__(self , lowercase__ = 8 , lowercase__ = 0.75 ): snake_case_ : Dict = initial_block_size snake_case_ : list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 snake_case_ : Optional[Any] = capacity_factor snake_case_ : str = 0 def __UpperCamelCase (self , lowercase__ ): return hash(__A ) % len(self._buckets ) def __UpperCamelCase (self , lowercase__ ): return (ind + 1) % len(self._buckets ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Tuple = self._buckets[ind] if not stored: snake_case_ : List[Any] = _Item(__A , __A ) self._len += 1 return True elif stored.key == key: snake_case_ : List[str] = _Item(__A , __A ) return True else: return False def __UpperCamelCase (self ): snake_case_ : Optional[Any] = len(self._buckets ) * self._capacity_factor return len(self ) >= int(__A ) def __UpperCamelCase (self ): if len(self._buckets ) <= self._initial_block_size: return False snake_case_ : Optional[Any] = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def __UpperCamelCase (self , lowercase__ ): snake_case_ : Optional[Any] = self._buckets snake_case_ : Optional[int] = [None] * new_size snake_case_ : Any = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def __UpperCamelCase (self ): self._resize(len(self._buckets ) * 2 ) def __UpperCamelCase (self ): self._resize(len(self._buckets ) // 2 ) def __UpperCamelCase (self , lowercase__ ): snake_case_ : List[Any] = self._get_bucket_index(__A ) for _ in range(len(self._buckets ) ): yield ind snake_case_ : int = self._get_next_ind(__A ) def __UpperCamelCase (self , lowercase__ , lowercase__ ): for ind in self._iterate_buckets(__A ): if self._try_set(__A , __A , __A ): break def __setitem__(self , lowercase__ , lowercase__ ): if self._is_full(): self._size_up() self._add_item(__A , __A ) def __delitem__(self , lowercase__ ): for ind in self._iterate_buckets(__A ): snake_case_ : str = self._buckets[ind] if item is None: raise KeyError(__A ) if item is _deleted: continue if item.key == key: snake_case_ : str = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__(self , lowercase__ ): for ind in self._iterate_buckets(__A ): snake_case_ : Dict = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(__A ) def __len__(self ): return self._len def __iter__(self ): yield from (item.key for item in self._buckets if item) def __repr__(self ): snake_case_ : Union[str, Any] = " ,".join( f'{item.key}: {item.val}' for item in self._buckets if item ) return f'HashMap({val_string})'
703
"""simple docstring""" import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ): """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): """simple docstring""" snake_case_ : Optional[Any] = tmp_path / """cache""" snake_case_ : Optional[int] = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): snake_case_ : Tuple = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read() _check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): """simple docstring""" snake_case_ : List[Any] = tmp_path / """cache""" snake_case_ : int = {"""text""": """string"""} snake_case_ : Any = features.copy() if features else default_expected_features snake_case_ : List[Any] = ( Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None ) snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" snake_case_ : Union[str, Any] = tmp_path / """cache""" snake_case_ : Optional[Any] = {"""text""": """string"""} snake_case_ : Optional[int] = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read() _check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ): """simple docstring""" if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ : List[str] = text_path elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ : str = [text_path] snake_case_ : List[str] = tmp_path / """cache""" snake_case_ : List[str] = {"""text""": """string"""} snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=("train",) ): """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for split in splits: snake_case_ : Dict = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" snake_case_ : int = tmp_path / """cache""" snake_case_ : List[str] = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): snake_case_ : Optional[Any] = TextDatasetReader({"""train""": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read() _check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" snake_case_ : Tuple = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" snake_case_ : List[str] = {"""text""": """string"""} snake_case_ : int = features.copy() if features else default_expected_features snake_case_ : Tuple = ( Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None ) snake_case_ : str = TextDatasetReader({"""train""": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" if split: snake_case_ : Union[str, Any] = {split: text_path} else: snake_case_ : Union[str, Any] = """train""" snake_case_ : int = {"""train""": text_path, """test""": text_path} snake_case_ : List[Any] = tmp_path / """cache""" snake_case_ : Tuple = {"""text""": """string"""} snake_case_ : int = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
48
0
"""simple docstring""" import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __lowercase ( lowercase__): """simple docstring""" def __init__(self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , lowercase__ = None , **lowercase__ , ): super().__init__( lowercase__ , split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , num_proc=lowercase__ , **lowercase__ , ) snake_case_ : int = field snake_case_ : int = path_or_paths if isinstance(lowercase__ , lowercase__ ) else {self.split: path_or_paths} snake_case_ : Tuple = Json( cache_dir=lowercase__ , data_files=lowercase__ , features=lowercase__ , field=lowercase__ , **lowercase__ , ) def __UpperCamelCase (self ): # Build iterable dataset if self.streaming: snake_case_ : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: snake_case_ : Union[str, Any] = None snake_case_ : Union[str, Any] = None snake_case_ : str = None snake_case_ : List[Any] = None self.builder.download_and_prepare( download_config=lowercase__ , download_mode=lowercase__ , verification_mode=lowercase__ , base_path=lowercase__ , num_proc=self.num_proc , ) snake_case_ : List[Any] = self.builder.as_dataset( split=self.split , verification_mode=lowercase__ , in_memory=self.keep_in_memory ) return dataset class __lowercase : """simple docstring""" def __init__(self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , **lowercase__ , ): if num_proc is not None and num_proc <= 0: raise ValueError(f'num_proc {num_proc} must be an integer > 0.' ) snake_case_ : Tuple = dataset snake_case_ : List[str] = path_or_buf snake_case_ : Optional[int] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE snake_case_ : Any = num_proc snake_case_ : Optional[int] = "utf-8" snake_case_ : Any = to_json_kwargs def __UpperCamelCase (self ): snake_case_ : Tuple = self.to_json_kwargs.pop("""path_or_buf""" , lowercase__ ) snake_case_ : List[str] = self.to_json_kwargs.pop("""orient""" , """records""" ) snake_case_ : Any = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False ) snake_case_ : Union[str, Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True ) snake_case_ : Any = self.to_json_kwargs.pop("""compression""" , lowercase__ ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(f'`datasets` currently does not support {compression} compression' ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , """wb""" , compression=lowercase__ ) as buffer: snake_case_ : Optional[int] = self._write(file_obj=lowercase__ , orient=lowercase__ , lines=lowercase__ , index=lowercase__ , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( f'The compression parameter is not supported when writing to a buffer, but compression={compression}' """ was passed. Please provide a local path instead.""" ) snake_case_ : Any = self._write( file_obj=self.path_or_buf , orient=lowercase__ , lines=lowercase__ , index=lowercase__ , **self.to_json_kwargs ) return written def __UpperCamelCase (self , lowercase__ ): snake_case_ : Dict = args snake_case_ : str = query_table( table=self.dataset.data , key=slice(lowercase__ , offset + self.batch_size ) , indices=self.dataset._indices , ) snake_case_ : Dict = batch.to_pandas().to_json( path_or_buf=lowercase__ , orient=lowercase__ , lines=lowercase__ , index=lowercase__ , **lowercase__ ) if not json_str.endswith("""\n""" ): json_str += "\n" return json_str.encode(self.encoding ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ , ): snake_case_ : List[str] = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ): snake_case_ : int = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(lowercase__ ) else: snake_case_ : Any = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase__ , lowercase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ): written += file_obj.write(lowercase__ ) return written
704
"""simple docstring""" from copy import deepcopy class __lowercase : """simple docstring""" def __init__(self , lowercase__ = None , lowercase__ = None ): if arr is None and size is not None: snake_case_ : str = size snake_case_ : Optional[Any] = [0] * size elif arr is not None: self.init(lowercase__ ) else: raise ValueError("""Either arr or size must be specified""" ) def __UpperCamelCase (self , lowercase__ ): snake_case_ : Optional[Any] = len(lowercase__ ) snake_case_ : int = deepcopy(lowercase__ ) for i in range(1 , self.size ): snake_case_ : Optional[Any] = self.next_(lowercase__ ) if j < self.size: self.tree[j] += self.tree[i] def __UpperCamelCase (self ): snake_case_ : Dict = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): snake_case_ : Optional[int] = self.next_(lowercase__ ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def __UpperCamelCase (lowercase__ ): return index + (index & (-index)) @staticmethod def __UpperCamelCase (lowercase__ ): return index - (index & (-index)) def __UpperCamelCase (self , lowercase__ , lowercase__ ): if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value snake_case_ : Tuple = self.next_(lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ ): self.add(lowercase__ , value - self.get(lowercase__ ) ) def __UpperCamelCase (self , lowercase__ ): if right == 0: return 0 snake_case_ : List[str] = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] snake_case_ : Optional[int] = self.prev(lowercase__ ) return result def __UpperCamelCase (self , lowercase__ , lowercase__ ): return self.prefix(lowercase__ ) - self.prefix(lowercase__ ) def __UpperCamelCase (self , lowercase__ ): return self.query(lowercase__ , index + 1 ) def __UpperCamelCase (self , lowercase__ ): value -= self.tree[0] if value < 0: return -1 snake_case_ : Tuple = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 snake_case_ : Tuple = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
48
0
"""simple docstring""" import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE): """simple docstring""" _A : str = 1 @register_to_config def __init__(self , lowercase__=20_00 , lowercase__=0.1 , lowercase__=20 , lowercase__=1e-3 ): snake_case_ : List[str] = None snake_case_ : Tuple = None snake_case_ : Any = None def __UpperCamelCase (self , lowercase__ , lowercase__ = None ): snake_case_ : int = torch.linspace(1 , self.config.sampling_eps , _a , device=_a ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ): if self.timesteps is None: raise ValueError( """`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score snake_case_ : str = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) snake_case_ : Union[str, Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) snake_case_ : Any = std.flatten() while len(std.shape ) < len(score.shape ): snake_case_ : List[str] = std.unsqueeze(-1 ) snake_case_ : Dict = -score / std # compute snake_case_ : Dict = -1.0 / len(self.timesteps ) snake_case_ : Optional[int] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) snake_case_ : Optional[int] = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): snake_case_ : str = beta_t.unsqueeze(-1 ) snake_case_ : int = -0.5 * beta_t * x snake_case_ : List[str] = torch.sqrt(_a ) snake_case_ : Any = drift - diffusion**2 * score snake_case_ : Optional[int] = x + drift * dt # add noise snake_case_ : Optional[Any] = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype ) snake_case_ : Dict = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__(self ): return self.config.num_train_timesteps
705
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list ): """simple docstring""" snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ ) for i in range(1 , SCREAMING_SNAKE_CASE__ ): snake_case_ : Tuple = collection[i] snake_case_ : Tuple = 0 snake_case_ : str = i - 1 while low <= high: snake_case_ : Optional[int] = (low + high) // 2 if val < collection[mid]: snake_case_ : List[str] = mid - 1 else: snake_case_ : str = mid + 1 for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ): snake_case_ : List[str] = collection[j - 1] snake_case_ : Any = val return collection if __name__ == "__main__": a_ = input('''Enter numbers separated by a comma:\n''').strip() a_ = [int(item) for item in user_input.split(''',''')] print(binary_insertion_sort(unsorted))
48
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __lowercase ( __a , __a , unittest.TestCase): """simple docstring""" _A : Union[str, Any] = StableDiffusionXLImgaImgPipeline _A : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} _A : Optional[int] = PipelineTesterMixin.required_optional_params - {"""latents"""} _A : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS _A : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS _A : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS def __UpperCamelCase (self ): torch.manual_seed(0 ) snake_case_ : Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=a_ , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) snake_case_ : List[str] = EulerDiscreteScheduler( beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , ) torch.manual_seed(0 ) snake_case_ : Optional[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) snake_case_ : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=32 , ) snake_case_ : Dict = CLIPTextModel(a_ ) snake_case_ : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=a_ ) snake_case_ : Tuple = CLIPTextModelWithProjection(a_ ) snake_case_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=a_ ) snake_case_ : Union[str, Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """text_encoder_2""": text_encoder_a, """tokenizer_2""": tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def __UpperCamelCase (self , lowercase__ , lowercase__=0 ): snake_case_ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ ) snake_case_ : Optional[int] = image / 2 + 0.5 if str(a_ ).startswith("""mps""" ): snake_case_ : Optional[int] = torch.manual_seed(a_ ) else: snake_case_ : str = torch.Generator(device=a_ ).manual_seed(a_ ) snake_case_ : str = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 5.0, """output_type""": """numpy""", """strength""": 0.75, } return inputs def __UpperCamelCase (self ): snake_case_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case_ : Tuple = self.get_dummy_components() snake_case_ : List[Any] = StableDiffusionXLImgaImgPipeline(**a_ ) snake_case_ : Optional[Any] = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) snake_case_ : str = self.get_dummy_inputs(a_ ) snake_case_ : List[Any] = sd_pipe(**a_ ).images snake_case_ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case_ : List[Any] = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCamelCase (self ): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def __UpperCamelCase (self ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def __UpperCamelCase (self ): pass def __UpperCamelCase (self ): snake_case_ : Optional[Any] = self.get_dummy_components() snake_case_ : Tuple = StableDiffusionXLImgaImgPipeline(**a_ ) snake_case_ : Union[str, Any] = sd_pipe.to(a_ ) snake_case_ : int = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) # forward without prompt embeds snake_case_ : str = self.get_dummy_inputs(a_ ) snake_case_ : Any = 3 * ["""this is a negative prompt"""] snake_case_ : Optional[Any] = negative_prompt snake_case_ : Optional[Any] = 3 * [inputs["""prompt"""]] snake_case_ : Any = sd_pipe(**a_ ) snake_case_ : List[str] = output.images[0, -3:, -3:, -1] # forward with prompt embeds snake_case_ : Optional[int] = self.get_dummy_inputs(a_ ) snake_case_ : Any = 3 * ["""this is a negative prompt"""] snake_case_ : List[str] = 3 * [inputs.pop("""prompt""" )] ( snake_case_ ) : str = sd_pipe.encode_prompt(a_ , negative_prompt=a_ ) snake_case_ : Any = sd_pipe( **a_ , prompt_embeds=a_ , negative_prompt_embeds=a_ , pooled_prompt_embeds=a_ , negative_pooled_prompt_embeds=a_ , ) snake_case_ : Optional[Any] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @slow @require_torch_gpu class __lowercase ( unittest.TestCase): """simple docstring""" def __UpperCamelCase (self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase (self , lowercase__ , lowercase__="cpu" , lowercase__=torch.floataa , lowercase__=0 ): snake_case_ : str = torch.Generator(device=a_ ).manual_seed(a_ ) snake_case_ : Any = np.random.RandomState(a_ ).standard_normal((1, 4, 64, 64) ) snake_case_ : str = torch.from_numpy(a_ ).to(device=a_ , dtype=a_ ) snake_case_ : int = { """prompt""": """a photograph of an astronaut riding a horse""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def __UpperCamelCase (self ): snake_case_ : str = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) snake_case_ : int = self.get_inputs(a_ ) snake_case_ : Dict = pipe(**a_ ).images snake_case_ : Optional[int] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 5_12, 3) snake_case_ : Optional[int] = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] ) assert np.abs(image_slice - expected_slice ).max() < 7e-3
706
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Union[str, Any] = ["""image_processor""", """tokenizer"""] _A : str = """ChineseCLIPImageProcessor""" _A : Tuple = ("""BertTokenizer""", """BertTokenizerFast""") def __init__(self , lowercase__=None , lowercase__=None , **lowercase__ ): snake_case_ : Any = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , lowercase__ , ) snake_case_ : Optional[Any] = kwargs.pop("""feature_extractor""" ) snake_case_ : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(lowercase__ , lowercase__ ) snake_case_ : Union[str, Any] = self.image_processor def __call__(self , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ): if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: snake_case_ : Any = self.tokenizer(lowercase__ , return_tensors=lowercase__ , **lowercase__ ) if images is not None: snake_case_ : Tuple = self.image_processor(lowercase__ , return_tensors=lowercase__ , **lowercase__ ) if text is not None and images is not None: snake_case_ : List[Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ ) def __UpperCamelCase (self , *lowercase__ , **lowercase__ ): return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ ) def __UpperCamelCase (self , *lowercase__ , **lowercase__ ): return self.tokenizer.decode(*lowercase__ , **lowercase__ ) @property def __UpperCamelCase (self ): snake_case_ : Optional[int] = self.tokenizer.model_input_names snake_case_ : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __UpperCamelCase (self ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase__ , ) return self.image_processor_class
48
0
"""simple docstring""" from math import ceil def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1_0_0_1 ): """simple docstring""" snake_case_ : Dict = 1 for i in range(1 , int(ceil(n / 2.0 ) ) ): snake_case_ : Optional[Any] = 2 * i + 1 snake_case_ : Optional[Any] = 2 * i snake_case_ : Dict = total + 4 * odd**2 - 6 * even return total if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution()) else: try: a_ = int(sys.argv[1]) print(solution(n)) except ValueError: print('''Invalid entry - please enter a number''')
707
"""simple docstring""" import argparse import copy def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" snake_case_ : List[Any] = {} with open(SCREAMING_SNAKE_CASE__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: snake_case_ : int = [] _list.append([line.split()[1], line.split()[2]] ) snake_case_ : Optional[Any] = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: snake_case_ : str = [] _list.append([line.split()[0], line.split()[2]] ) snake_case_ : Optional[Any] = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" with open(SCREAMING_SNAKE_CASE__ ) as f: snake_case_ : Optional[Any] = f.read(1 ) snake_case_ : Union[str, Any] = start_node snake_case_ : Dict = [] snake_case_ : Union[str, Any] = start_node snake_case_ : Tuple = 0 while visiting not in first_solution: snake_case_ : int = 1_0_0_0_0 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution: snake_case_ : Union[str, Any] = k[1] snake_case_ : Any = k[0] first_solution.append(SCREAMING_SNAKE_CASE__ ) snake_case_ : Tuple = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ ) snake_case_ : List[str] = best_node first_solution.append(SCREAMING_SNAKE_CASE__ ) snake_case_ : Optional[Any] = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 snake_case_ : int = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0_0_0_0 ) return first_solution, distance_of_first_solution def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" snake_case_ : Union[str, Any] = [] for n in solution[1:-1]: snake_case_ : str = solution.index(SCREAMING_SNAKE_CASE__ ) for kn in solution[1:-1]: snake_case_ : Tuple = solution.index(SCREAMING_SNAKE_CASE__ ) if n == kn: continue snake_case_ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ ) snake_case_ : int = kn snake_case_ : Dict = n snake_case_ : Optional[int] = 0 for k in _tmp[:-1]: snake_case_ : Dict = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: snake_case_ : Dict = distance + int(i[1] ) _tmp.append(SCREAMING_SNAKE_CASE__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) snake_case_ : Optional[Any] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ): """simple docstring""" snake_case_ : Dict = 1 snake_case_ : List[Any] = first_solution snake_case_ : List[Any] = [] snake_case_ : Optional[Any] = distance_of_first_solution snake_case_ : Dict = solution while count <= iters: snake_case_ : List[str] = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ : List[Any] = 0 snake_case_ : List[Any] = neighborhood[index_of_best_solution] snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1 snake_case_ : List[str] = False while not found: snake_case_ : Tuple = 0 while i < len(SCREAMING_SNAKE_CASE__ ): if best_solution[i] != solution[i]: snake_case_ : Optional[Any] = best_solution[i] snake_case_ : int = solution[i] break snake_case_ : List[str] = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) snake_case_ : Tuple = True snake_case_ : Dict = best_solution[:-1] snake_case_ : Tuple = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: snake_case_ : Tuple = cost snake_case_ : Union[str, Any] = solution else: snake_case_ : str = index_of_best_solution + 1 snake_case_ : Tuple = neighborhood[index_of_best_solution] if len(SCREAMING_SNAKE_CASE__ ) >= size: tabu_list.pop(0 ) snake_case_ : List[str] = count + 1 return best_solution_ever, best_cost def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ): """simple docstring""" snake_case_ : Tuple = generate_neighbours(args.File ) snake_case_ , snake_case_ : Optional[Any] = generate_first_solution( args.File , SCREAMING_SNAKE_CASE__ ) snake_case_ , snake_case_ : Dict = tabu_search( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , ) print(f'Best solution: {best_sol}, with total distance: {best_cost}.' ) if __name__ == "__main__": a_ = argparse.ArgumentParser(description='''Tabu Search''') parser.add_argument( '''-f''', '''--File''', type=str, help='''Path to the file containing the data''', required=True, ) parser.add_argument( '''-i''', '''--Iterations''', type=int, help='''How many iterations the algorithm should perform''', required=True, ) parser.add_argument( '''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True ) # Pass the arguments to main method main(parser.parse_args())
48
0
"""simple docstring""" import sys a_ = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" snake_case_ : str = 1 for digit in s: product *= int(UpperCAmelCase__ ) return product def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str = N ): """simple docstring""" snake_case_ : Optional[int] = -sys.maxsize - 1 snake_case_ : str = n[:1_3] snake_case_ : Optional[int] = 1_3 while cur_index < len(UpperCAmelCase__ ) - 1_3: if int(n[cur_index] ) >= int(substr[0] ): snake_case_ : Optional[int] = substr[1:] + n[cur_index] cur_index += 1 else: snake_case_ : Optional[int] = max(UpperCAmelCase__ , str_eval(UpperCAmelCase__ ) ) snake_case_ : Dict = n[cur_index : cur_index + 1_3] cur_index += 1_3 return largest_product if __name__ == "__main__": print(F'''{solution() = }''')
708
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings a_ = r''' [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: title_sep (`str`, *optional*, defaults to `" / "`): Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`]. doc_sep (`str`, *optional*, defaults to `" // "`): Separator inserted between the text of the retrieved document and the original input when calling [`RagRetriever`]. n_docs (`int`, *optional*, defaults to 5): Number of documents to retrieve. max_combined_length (`int`, *optional*, defaults to 300): Max length of contextualized input returned by [`~RagRetriever.__call__`]. retrieval_vector_size (`int`, *optional*, defaults to 768): Dimensionality of the document embeddings indexed by [`RagRetriever`]. retrieval_batch_size (`int`, *optional*, defaults to 8): Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated [`RagRetriever`]. dataset (`str`, *optional*, defaults to `"wiki_dpr"`): A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids using `datasets.list_datasets()`). dataset_split (`str`, *optional*, defaults to `"train"`) Which split of the `dataset` to load. index_name (`str`, *optional*, defaults to `"compressed"`) The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and `"compressed"`. index_path (`str`, *optional*) The path to the serialized faiss index on disk. passages_path (`str`, *optional*): A path to text passages compatible with the faiss index. Required if using [`~models.rag.retrieval_rag.LegacyIndex`] use_dummy_dataset (`bool`, *optional*, defaults to `False`) Whether to load a "dummy" variant of the dataset specified by `dataset`. label_smoothing (`float`, *optional*, defaults to 0.0): Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. do_marginalize (`bool`, *optional*, defaults to `False`): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. reduce_loss (`bool`, *optional*, defaults to `False`): Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation. do_deduplication (`bool`, *optional*, defaults to `True`): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. exclude_bos_score (`bool`, *optional*, defaults to `False`): Whether or not to disregard the BOS token when computing the loss. output_retrieved(`bool`, *optional*, defaults to `False`): If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask` are returned. See returned tensors for more detail. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. ''' @add_start_docstrings(_UpperCAmelCase) class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Optional[int] = """rag""" _A : Optional[Any] = True def __init__(self , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=" / " , lowercase__=" // " , lowercase__=5 , lowercase__=3_00 , lowercase__=7_68 , lowercase__=8 , lowercase__="wiki_dpr" , lowercase__="train" , lowercase__="compressed" , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=None , **lowercase__ , ): super().__init__( bos_token_id=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , prefix=lowercase__ , vocab_size=lowercase__ , **lowercase__ , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" snake_case_ : List[Any] = kwargs.pop("""question_encoder""" ) snake_case_ : Tuple = question_encoder_config.pop("""model_type""" ) snake_case_ : List[str] = kwargs.pop("""generator""" ) snake_case_ : List[str] = decoder_config.pop("""model_type""" ) from ..auto.configuration_auto import AutoConfig snake_case_ : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ ) snake_case_ : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ ) snake_case_ : int = reduce_loss snake_case_ : Optional[int] = label_smoothing snake_case_ : Dict = exclude_bos_score snake_case_ : Union[str, Any] = do_marginalize snake_case_ : Union[str, Any] = title_sep snake_case_ : int = doc_sep snake_case_ : int = n_docs snake_case_ : List[str] = max_combined_length snake_case_ : Tuple = dataset snake_case_ : int = dataset_split snake_case_ : str = index_name snake_case_ : List[str] = retrieval_vector_size snake_case_ : Dict = retrieval_batch_size snake_case_ : str = passages_path snake_case_ : Union[str, Any] = index_path snake_case_ : Tuple = use_dummy_dataset snake_case_ : Dict = output_retrieved snake_case_ : str = do_deduplication snake_case_ : Any = use_cache if self.forced_eos_token_id is None: snake_case_ : Any = getattr(self.generator , """forced_eos_token_id""" , lowercase__ ) @classmethod def __UpperCamelCase (cls , lowercase__ , lowercase__ , **lowercase__ ): return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase__ ) def __UpperCamelCase (self ): snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ ) snake_case_ : Any = self.question_encoder.to_dict() snake_case_ : Dict = self.generator.to_dict() snake_case_ : Union[str, Any] = self.__class__.model_type return output
48
0
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { '''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''', } class __lowercase ( a__): """simple docstring""" _A : int = """git_vision_model""" def __init__(self , lowercase__=7_68 , lowercase__=30_72 , lowercase__=12 , lowercase__=12 , lowercase__=3 , lowercase__=2_24 , lowercase__=16 , lowercase__="quick_gelu" , lowercase__=1e-5 , lowercase__=0.0 , lowercase__=0.02 , **lowercase__ , ): super().__init__(**lowerCAmelCase__ ) snake_case_ : Tuple = hidden_size snake_case_ : List[Any] = intermediate_size snake_case_ : List[Any] = num_hidden_layers snake_case_ : Tuple = num_attention_heads snake_case_ : str = num_channels snake_case_ : Union[str, Any] = patch_size snake_case_ : List[Any] = image_size snake_case_ : Optional[int] = initializer_range snake_case_ : Tuple = attention_dropout snake_case_ : Union[str, Any] = layer_norm_eps snake_case_ : List[Any] = hidden_act @classmethod def __UpperCamelCase (cls , lowercase__ , **lowercase__ ): cls._set_token_in_kwargs(lowerCAmelCase__ ) snake_case_ : List[str] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ ) # get the vision config dict if we are loading from GITConfig if config_dict.get("""model_type""" ) == "git": snake_case_ : Dict = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ ) class __lowercase ( a__): """simple docstring""" _A : List[Any] = """git""" def __init__(self , lowercase__=None , lowercase__=3_05_22 , lowercase__=7_68 , lowercase__=6 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=10_24 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__=True , lowercase__=False , lowercase__=1_01 , lowercase__=1_02 , lowercase__=None , **lowercase__ , ): super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) if vision_config is None: snake_case_ : List[str] = {} logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" ) snake_case_ : Optional[int] = GitVisionConfig(**lowerCAmelCase__ ) snake_case_ : str = vocab_size snake_case_ : Union[str, Any] = hidden_size snake_case_ : str = num_hidden_layers snake_case_ : List[str] = num_attention_heads snake_case_ : Any = hidden_act snake_case_ : Tuple = intermediate_size snake_case_ : List[Any] = hidden_dropout_prob snake_case_ : Tuple = attention_probs_dropout_prob snake_case_ : str = max_position_embeddings snake_case_ : int = initializer_range snake_case_ : Optional[int] = layer_norm_eps snake_case_ : int = position_embedding_type snake_case_ : Optional[Any] = use_cache snake_case_ : Any = tie_word_embeddings snake_case_ : List[Any] = num_image_with_embedding snake_case_ : int = bos_token_id snake_case_ : str = eos_token_id def __UpperCamelCase (self ): snake_case_ : int = copy.deepcopy(self.__dict__ ) snake_case_ : Optional[int] = self.vision_config.to_dict() snake_case_ : Optional[Any] = self.__class__.model_type return output
709
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING a_ = logging.get_logger(__name__) class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Optional[int] = """upernet""" def __init__(self , lowercase__=None , lowercase__=5_12 , lowercase__=0.02 , lowercase__=[1, 2, 3, 6] , lowercase__=True , lowercase__=0.4 , lowercase__=3_84 , lowercase__=2_56 , lowercase__=1 , lowercase__=False , lowercase__=2_55 , **lowercase__ , ): super().__init__(**lowercase__ ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) snake_case_ : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(lowercase__ , lowercase__ ): snake_case_ : Tuple = backbone_config.get("""model_type""" ) snake_case_ : List[str] = CONFIG_MAPPING[backbone_model_type] snake_case_ : List[Any] = config_class.from_dict(lowercase__ ) snake_case_ : List[Any] = backbone_config snake_case_ : Optional[Any] = hidden_size snake_case_ : Any = initializer_range snake_case_ : str = pool_scales snake_case_ : Dict = use_auxiliary_head snake_case_ : str = auxiliary_loss_weight snake_case_ : List[str] = auxiliary_in_channels snake_case_ : Optional[Any] = auxiliary_channels snake_case_ : Any = auxiliary_num_convs snake_case_ : List[Any] = auxiliary_concat_input snake_case_ : List[str] = loss_ignore_index def __UpperCamelCase (self ): snake_case_ : Dict = copy.deepcopy(self.__dict__ ) snake_case_ : Union[str, Any] = self.backbone_config.to_dict() snake_case_ : Any = self.__class__.model_type return output
48
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class __lowercase ( metaclass=UpperCAmelCase__): """simple docstring""" _A : List[Any] = ["""torch""", """scipy"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch""", """scipy"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch""", """scipy"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch""", """scipy"""] )
710
"""simple docstring""" import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask a_ = logging.getLogger(__name__) class __lowercase ( _UpperCAmelCase): """simple docstring""" def __init__(self , lowercase__=-1 ): # in NER datasets, the last column is usually reserved for NER label snake_case_ : Union[str, Any] = label_idx def __UpperCamelCase (self , lowercase__ , lowercase__ ): if isinstance(lowercase__ , lowercase__ ): snake_case_ : List[str] = mode.value snake_case_ : List[Any] = os.path.join(lowercase__ , f'{mode}.txt' ) snake_case_ : Tuple = 1 snake_case_ : Any = [] with open(lowercase__ , encoding="""utf-8""" ) as f: snake_case_ : str = [] snake_case_ : List[Any] = [] for line in f: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) ) guid_index += 1 snake_case_ : Optional[Any] = [] snake_case_ : int = [] else: snake_case_ : Optional[Any] = line.split(""" """ ) words.append(splits[0] ) if len(lowercase__ ) > 1: labels.append(splits[self.label_idx].replace("""\n""" , """""" ) ) else: # Examples could have no label for mode = "test" labels.append("""O""" ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) ) return examples def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : str = 0 for line in test_input_reader: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": writer.write(lowercase__ ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: snake_case_ : Optional[int] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n""" writer.write(lowercase__ ) else: logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] ) def __UpperCamelCase (self , lowercase__ ): if path: with open(lowercase__ , """r""" ) as f: snake_case_ : Dict = f.read().splitlines() if "O" not in labels: snake_case_ : List[Any] = ["""O"""] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class __lowercase ( _UpperCAmelCase): """simple docstring""" def __init__(self ): # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def __UpperCamelCase (self , lowercase__ ): if path: with open(lowercase__ , """r""" ) as f: snake_case_ : Any = f.read().splitlines() if "O" not in labels: snake_case_ : Tuple = ["""O"""] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class __lowercase ( _UpperCAmelCase): """simple docstring""" def __UpperCamelCase (self , lowercase__ , lowercase__ ): if isinstance(lowercase__ , lowercase__ ): snake_case_ : List[Any] = mode.value snake_case_ : Optional[int] = os.path.join(lowercase__ , f'{mode}.txt' ) snake_case_ : Tuple = 1 snake_case_ : str = [] with open(lowercase__ , encoding="""utf-8""" ) as f: for sentence in parse_incr(lowercase__ ): snake_case_ : Tuple = [] snake_case_ : Any = [] for token in sentence: words.append(token["""form"""] ) labels.append(token["""upos"""] ) assert len(lowercase__ ) == len(lowercase__ ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) ) guid_index += 1 return examples def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Dict = 0 for sentence in parse_incr(lowercase__ ): snake_case_ : int = preds_list[example_id] snake_case_ : Dict = """""" for token in sentence: out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ' out += "\n" writer.write(lowercase__ ) example_id += 1 def __UpperCamelCase (self , lowercase__ ): if path: with open(lowercase__ , """r""" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
48
0
"""simple docstring""" import os import jsonlines import numpy as np from tqdm import tqdm a_ = 2048 a_ = 4096 a_ = 42 a_ = os.environ.pop('''PROCESS_TRAIN''', '''false''') a_ = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4} def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" def choose_first(SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int]=False ): assert isinstance(_snake_case , _snake_case ) if len(_snake_case ) == 1: snake_case_ : List[Any] = answer[0] return {k: [answer[k]] for k in answer} if is_long_answer else answer for a in answer: if is_long_answer: snake_case_ : List[Any] = {k: [a[k]] for k in a} if len(a["""start_token"""] ) > 0: break return a snake_case_ : Any = {"""id""": example["""id"""]} snake_case_ : Tuple = example["""annotations"""] snake_case_ : Union[str, Any] = annotation["""yes_no_answer"""] if 0 in yes_no_answer or 1 in yes_no_answer: snake_case_ : str = ["""yes"""] if 1 in yes_no_answer else ["""no"""] snake_case_ : Dict = [] snake_case_ : List[str] = [] snake_case_ : Union[str, Any] = ["""<cls>"""] else: snake_case_ : List[Any] = ["""short"""] snake_case_ : Any = choose_first(annotation["""short_answers"""] ) if len(out["""start_token"""] ) == 0: # answer will be long if short is not available snake_case_ : str = ["""long"""] snake_case_ : str = choose_first(annotation["""long_answer"""] , is_long_answer=_snake_case ) snake_case_ : Tuple = [] answer.update(_snake_case ) # disregard some samples if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]: snake_case_ : Dict = True else: snake_case_ : Tuple = False snake_case_ : Tuple = ["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""] if not all(isinstance(answer[k] , _snake_case ) for k in cols ): raise ValueError("""Issue in ID""" , example["""id"""] ) return answer def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ): """simple docstring""" snake_case_ : int = _get_single_answer(_snake_case ) # bytes are of no use del answer["start_byte"] del answer["end_byte"] # handle yes_no answers explicitly if answer["category"][0] in ["yes", "no"]: # category is list with one element snake_case_ : Optional[int] = example["""document"""]["""tokens"""] snake_case_ : Tuple = [] for i in range(len(doc["""token"""] ) ): if not doc["is_html"][i]: context.append(doc["""token"""][i] ) return { "context": " ".join(_snake_case ), "answer": { "start_token": -1_0_0, # ignore index in cross-entropy "end_token": -1_0_0, # ignore index in cross-entropy "category": answer["category"], "span": answer["category"], # extra }, } # later, help in removing all no answers if answer["start_token"] == [-1]: return { "context": "None", "answer": { "start_token": -1, "end_token": -1, "category": "null", "span": "None", # extra }, } # handling normal samples snake_case_ : Dict = ["""start_token""", """end_token"""] answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10 snake_case_ : List[str] = example["""document"""]["""tokens"""] snake_case_ : Dict = answer["""start_token"""] snake_case_ : int = answer["""end_token"""] snake_case_ : Any = [] for i in range(len(doc["""token"""] ) ): if not doc["is_html"][i]: context.append(doc["""token"""][i] ) else: if answer["start_token"] > i: start_token -= 1 if answer["end_token"] > i: end_token -= 1 snake_case_ : Any = """ """.join(context[start_token:end_token] ) # checking above code if assertion: snake_case_ : str = doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]] snake_case_ : Dict = doc["""token"""][answer["""start_token"""] : answer["""end_token"""]] snake_case_ : Tuple = """ """.join([old[i] for i in range(len(_snake_case ) ) if not is_html[i]] ) if new != old: print("""ID:""" , example["""id"""] ) print("""New:""" , _snake_case , end="""\n""" ) print("""Old:""" , _snake_case , end="""\n\n""" ) return { "context": " ".join(_snake_case ), "answer": { "start_token": start_token, "end_token": end_token - 1, # this makes it inclusive "category": answer["category"], # either long or short "span": new, # extra }, } def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=2_0_4_8 , SCREAMING_SNAKE_CASE__ : Tuple=4_0_9_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ): """simple docstring""" snake_case_ : Any = get_context_and_ans(_snake_case , assertion=_snake_case ) snake_case_ : Any = out["""answer"""] # later, removing these samples if answer["start_token"] == -1: return { "example_id": example["id"], "input_ids": [[-1]], "labels": { "start_token": [-1], "end_token": [-1], "category": ["null"], }, } snake_case_ : Optional[Any] = tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids snake_case_ : List[str] = input_ids.index(tokenizer.sep_token_id ) + 1 # return yes/no if answer["category"][0] in ["yes", "no"]: # category is list with one element snake_case_ : int = [] snake_case_ : Union[str, Any] = [] snake_case_ : Tuple = input_ids[:q_len] snake_case_ : Union[str, Any] = range(_snake_case , len(_snake_case ) , max_length - doc_stride ) for i in doc_start_indices: snake_case_ : str = i + max_length - q_len snake_case_ : Dict = input_ids[i:end_index] inputs.append(q_indices + slice ) category.append(answer["""category"""][0] ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": [-1_0_0] * len(_snake_case ), "end_token": [-1_0_0] * len(_snake_case ), "category": category, }, } snake_case_ : Dict = out["""context"""].split() snake_case_ : Union[str, Any] = splitted_context[answer["""end_token"""]] snake_case_ : int = len( tokenizer( """ """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=_snake_case , ).input_ids ) snake_case_ : List[Any] = len( tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=_snake_case ).input_ids ) answer["start_token"] += q_len answer["end_token"] += q_len # fixing end token snake_case_ : str = len(tokenizer(_snake_case , add_special_tokens=_snake_case ).input_ids ) if num_sub_tokens > 1: answer["end_token"] += num_sub_tokens - 1 snake_case_ : List[str] = input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive snake_case_ : str = answer["""start_token"""] snake_case_ : Any = answer["""end_token"""] if assertion: snake_case_ : Optional[Any] = tokenizer.decode(_snake_case ) if answer["span"] != new: print("""ISSUE IN TOKENIZATION""" ) print("""OLD:""" , answer["""span"""] ) print("""NEW:""" , _snake_case , end="""\n\n""" ) if len(_snake_case ) <= max_length: return { "example_id": example["id"], "input_ids": [input_ids], "labels": { "start_token": [answer["start_token"]], "end_token": [answer["end_token"]], "category": answer["category"], }, } snake_case_ : Optional[Any] = input_ids[:q_len] snake_case_ : Union[str, Any] = range(_snake_case , len(_snake_case ) , max_length - doc_stride ) snake_case_ : str = [] snake_case_ : Union[str, Any] = [] snake_case_ : Tuple = [] snake_case_ : Dict = [] # null, yes, no, long, short for i in doc_start_indices: snake_case_ : List[Any] = i + max_length - q_len snake_case_ : str = input_ids[i:end_index] inputs.append(q_indices + slice ) assert len(inputs[-1] ) <= max_length, "Issue in truncating length" if start_token >= i and end_token <= end_index - 1: snake_case_ : List[str] = start_token - i + q_len snake_case_ : List[str] = end_token - i + q_len answers_category.append(answer["""category"""][0] ) # ["short"] -> "short" else: snake_case_ : str = -1_0_0 snake_case_ : List[Any] = -1_0_0 answers_category.append("""null""" ) snake_case_ : Union[str, Any] = inputs[-1][start_token : end_token + 1] answers_start_token.append(_snake_case ) answers_end_token.append(_snake_case ) if assertion: if new != old and new != [tokenizer.cls_token_id]: print("""ISSUE in strided for ID:""" , example["""id"""] ) print("""New:""" , tokenizer.decode(_snake_case ) ) print("""Old:""" , tokenizer.decode(_snake_case ) , end="""\n\n""" ) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": answers_start_token, "end_token": answers_end_token, "category": answers_category, }, } def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=2_0_4_8 , SCREAMING_SNAKE_CASE__ : str=4_0_9_6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ): """simple docstring""" snake_case_ : Any = get_strided_contexts_and_ans( _snake_case , _snake_case , doc_stride=_snake_case , max_length=_snake_case , assertion=_snake_case , ) return example def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" with jsonlines.open(_snake_case , """a""" ) as writer: for example in tqdm(_snake_case , total=len(_snake_case ) , desc="""Saving samples ... """ ): snake_case_ : Tuple = example["""labels"""] for ids, start, end, cat in zip( example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ): if start == -1 and end == -1: continue # leave waste samples with no answer if cat == "null" and np.random.rand() < 0.6: continue # removing 50 % samples writer.write( { """input_ids""": ids, """start_token""": start, """end_token""": end, """category""": CATEGORY_MAPPING[cat], } ) if __name__ == "__main__": from datasets import load_dataset from transformers import BigBirdTokenizer a_ = load_dataset('''natural_questions''') a_ = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''') a_ = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation'''] a_ = { '''tokenizer''': tokenizer, '''doc_stride''': DOC_STRIDE, '''max_length''': MAX_LENGTH, '''assertion''': False, } a_ = data.map(prepare_inputs, fn_kwargs=fn_kwargs) a_ = data.remove_columns(['''annotations''', '''document''', '''id''', '''question''']) print(data) np.random.seed(SEED) a_ = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl''' save_to_disk(data, file_name=cache_file_name)
711
"""simple docstring""" import random def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" snake_case_ : Union[str, Any] = num - 1 snake_case_ : List[str] = 0 while s % 2 == 0: snake_case_ : str = s // 2 t += 1 for _ in range(5 ): snake_case_ : List[Any] = random.randrange(2 , num - 1 ) snake_case_ : Dict = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if v != 1: snake_case_ : int = 0 while v != (num - 1): if i == t - 1: return False else: snake_case_ : str = i + 1 snake_case_ : int = (v**2) % num return True def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" if num < 2: return False snake_case_ : Dict = [ 2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1, 4_3, 4_7, 5_3, 5_9, 6_1, 6_7, 7_1, 7_3, 7_9, 8_3, 8_9, 9_7, 1_0_1, 1_0_3, 1_0_7, 1_0_9, 1_1_3, 1_2_7, 1_3_1, 1_3_7, 1_3_9, 1_4_9, 1_5_1, 1_5_7, 1_6_3, 1_6_7, 1_7_3, 1_7_9, 1_8_1, 1_9_1, 1_9_3, 1_9_7, 1_9_9, 2_1_1, 2_2_3, 2_2_7, 2_2_9, 2_3_3, 2_3_9, 2_4_1, 2_5_1, 2_5_7, 2_6_3, 2_6_9, 2_7_1, 2_7_7, 2_8_1, 2_8_3, 2_9_3, 3_0_7, 3_1_1, 3_1_3, 3_1_7, 3_3_1, 3_3_7, 3_4_7, 3_4_9, 3_5_3, 3_5_9, 3_6_7, 3_7_3, 3_7_9, 3_8_3, 3_8_9, 3_9_7, 4_0_1, 4_0_9, 4_1_9, 4_2_1, 4_3_1, 4_3_3, 4_3_9, 4_4_3, 4_4_9, 4_5_7, 4_6_1, 4_6_3, 4_6_7, 4_7_9, 4_8_7, 4_9_1, 4_9_9, 5_0_3, 5_0_9, 5_2_1, 5_2_3, 5_4_1, 5_4_7, 5_5_7, 5_6_3, 5_6_9, 5_7_1, 5_7_7, 5_8_7, 5_9_3, 5_9_9, 6_0_1, 6_0_7, 6_1_3, 6_1_7, 6_1_9, 6_3_1, 6_4_1, 6_4_3, 6_4_7, 6_5_3, 6_5_9, 6_6_1, 6_7_3, 6_7_7, 6_8_3, 6_9_1, 7_0_1, 7_0_9, 7_1_9, 7_2_7, 7_3_3, 7_3_9, 7_4_3, 7_5_1, 7_5_7, 7_6_1, 7_6_9, 7_7_3, 7_8_7, 7_9_7, 8_0_9, 8_1_1, 8_2_1, 8_2_3, 8_2_7, 8_2_9, 8_3_9, 8_5_3, 8_5_7, 8_5_9, 8_6_3, 8_7_7, 8_8_1, 8_8_3, 8_8_7, 9_0_7, 9_1_1, 9_1_9, 9_2_9, 9_3_7, 9_4_1, 9_4_7, 9_5_3, 9_6_7, 9_7_1, 9_7_7, 9_8_3, 9_9_1, 9_9_7, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ): """simple docstring""" while True: snake_case_ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) ) if is_prime_low_num(SCREAMING_SNAKE_CASE__ ): return num if __name__ == "__main__": a_ = generate_large_prime() print(('''Prime number:''', num)) print(('''is_prime_low_num:''', is_prime_low_num(num)))
48
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : List[Any] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Union[str, Any] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Optional[Any] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : List[str] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : List[Any] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Union[str, Any] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : List[str] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Union[str, Any] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : List[str] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Any = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : List[str] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) def SCREAMING_SNAKE_CASE__ ( *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" requires_backends(_lowercase , ["""torch"""] ) def SCREAMING_SNAKE_CASE__ ( *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" requires_backends(_lowercase , ["""torch"""] ) def SCREAMING_SNAKE_CASE__ ( *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" requires_backends(_lowercase , ["""torch"""] ) def SCREAMING_SNAKE_CASE__ ( *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" requires_backends(_lowercase , ["""torch"""] ) def SCREAMING_SNAKE_CASE__ ( *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" requires_backends(_lowercase , ["""torch"""] ) def SCREAMING_SNAKE_CASE__ ( *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" requires_backends(_lowercase , ["""torch"""] ) def SCREAMING_SNAKE_CASE__ ( *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" requires_backends(_lowercase , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : str = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Union[str, Any] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : List[Any] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Any = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Union[str, Any] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Optional[int] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : List[Any] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Optional[Any] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : List[str] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Tuple = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Tuple = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Optional[Any] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : str = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : List[str] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Optional[int] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : str = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : List[str] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : str = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Optional[int] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : int = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : List[str] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Union[str, Any] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Optional[Any] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Optional[int] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Any = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : List[str] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Dict = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Optional[Any] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : str = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Dict = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Dict = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : List[str] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Dict = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Optional[int] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Tuple = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Any = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : int = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Optional[Any] = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) class __lowercase ( metaclass=UpperCamelCase_): """simple docstring""" _A : Any = ["""torch"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""torch"""] )
712
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType a_ = logging.get_logger(__name__) a_ = { '''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''', '''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''', '''microsoft/deberta-v2-xlarge-mnli''': ( '''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json''' ), '''microsoft/deberta-v2-xxlarge-mnli''': ( '''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json''' ), } class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Dict = """deberta-v2""" def __init__(self , lowercase__=12_81_00 , lowercase__=15_36 , lowercase__=24 , lowercase__=24 , lowercase__=61_44 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=0 , lowercase__=0.02 , lowercase__=1e-7 , lowercase__=False , lowercase__=-1 , lowercase__=0 , lowercase__=True , lowercase__=None , lowercase__=0 , lowercase__="gelu" , **lowercase__ , ): super().__init__(**lowercase__ ) snake_case_ : Union[str, Any] = hidden_size snake_case_ : str = num_hidden_layers snake_case_ : Tuple = num_attention_heads snake_case_ : Dict = intermediate_size snake_case_ : Optional[int] = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Any = attention_probs_dropout_prob snake_case_ : List[Any] = max_position_embeddings snake_case_ : Union[str, Any] = type_vocab_size snake_case_ : Union[str, Any] = initializer_range snake_case_ : List[Any] = relative_attention snake_case_ : Dict = max_relative_positions snake_case_ : Optional[int] = pad_token_id snake_case_ : List[str] = position_biased_input # Backwards compatibility if type(lowercase__ ) == str: snake_case_ : Union[str, Any] = [x.strip() for x in pos_att_type.lower().split("""|""" )] snake_case_ : Optional[int] = pos_att_type snake_case_ : List[str] = vocab_size snake_case_ : Tuple = layer_norm_eps snake_case_ : List[Any] = kwargs.get("""pooler_hidden_size""" , lowercase__ ) snake_case_ : List[str] = pooler_dropout snake_case_ : int = pooler_hidden_act class __lowercase ( _UpperCAmelCase): """simple docstring""" @property def __UpperCamelCase (self ): if self.task == "multiple-choice": snake_case_ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: snake_case_ : int = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def __UpperCamelCase (self ): return 12 def __UpperCamelCase (self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , lowercase__ = 3 , lowercase__ = 40 , lowercase__ = 40 , lowercase__ = None , ): snake_case_ : str = super().generate_dummy_inputs(preprocessor=lowercase__ , framework=lowercase__ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
48
0
"""simple docstring""" import os def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] = "matrix.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(__UpperCamelCase ) , __UpperCamelCase ) ) as in_file: snake_case_ : str = in_file.read() snake_case_ : Optional[Any] = [[int(__UpperCamelCase ) for cell in row.split(""",""" )] for row in data.strip().splitlines()] snake_case_ : str = [[0 for cell in row] for row in grid] snake_case_ : Union[str, Any] = len(grid[0] ) snake_case_ : str = [[0 for i in range(__UpperCamelCase )] for j in range(__UpperCamelCase )] snake_case_ : Tuple = grid[0][0] for i in range(1 , __UpperCamelCase ): snake_case_ : int = grid[0][i] + dp[0][i - 1] for i in range(1 , __UpperCamelCase ): snake_case_ : List[str] = grid[i][0] + dp[i - 1][0] for i in range(1 , __UpperCamelCase ): for j in range(1 , __UpperCamelCase ): snake_case_ : Any = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] ) return dp[-1][-1] if __name__ == "__main__": print(F'''{solution() = }''')
713
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
48
0
"""simple docstring""" import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset a_ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class __lowercase ( nn.Module): """simple docstring""" def __init__(self , lowercase__ ): super().__init__() snake_case_ : List[Any] = torchvision.models.resnetaaa(pretrained=lowercase__ ) snake_case_ : str = list(model.children() )[:-2] snake_case_ : Optional[int] = nn.Sequential(*lowercase__ ) snake_case_ : Dict = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def __UpperCamelCase (self , lowercase__ ): snake_case_ : List[Any] = self.pool(self.model(lowercase__ ) ) snake_case_ : Any = torch.flatten(lowercase__ , start_dim=2 ) snake_case_ : int = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class __lowercase ( UpperCAmelCase__): """simple docstring""" def __init__(self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Optional[Any] = [json.loads(lowercase__ ) for l in open(lowercase__ )] snake_case_ : Optional[int] = os.path.dirname(lowercase__ ) snake_case_ : List[Any] = tokenizer snake_case_ : Any = labels snake_case_ : Any = len(lowercase__ ) snake_case_ : str = max_seq_length snake_case_ : Union[str, Any] = transforms def __len__(self ): return len(self.data ) def __getitem__(self , lowercase__ ): snake_case_ : str = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=lowercase__ ) ) snake_case_ : Dict = sentence[0], sentence[1:-1], sentence[-1] snake_case_ : int = sentence[: self.max_seq_length] snake_case_ : Tuple = torch.zeros(self.n_classes ) snake_case_ : str = 1 snake_case_ : int = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" ) snake_case_ : int = self.transforms(lowercase__ ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def __UpperCamelCase (self ): snake_case_ : Union[str, Any] = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" snake_case_ : List[Any] = [len(row["""sentence"""] ) for row in batch] snake_case_ : List[Any] = len(SCREAMING_SNAKE_CASE__ ), max(SCREAMING_SNAKE_CASE__ ) snake_case_ : Tuple = torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=torch.long ) snake_case_ : Tuple = torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ): snake_case_ : Union[str, Any] = input_row["""sentence"""] snake_case_ : List[Any] = 1 snake_case_ : Any = torch.stack([row["""image"""] for row in batch] ) snake_case_ : Tuple = torch.stack([row["""label"""] for row in batch] ) snake_case_ : Dict = torch.stack([row["""image_start_token"""] for row in batch] ) snake_case_ : Optional[Any] = torch.stack([row["""image_end_token"""] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" return transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize( mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ), ] )
714
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin a_ = get_tests_dir('''fixtures/test_sentencepiece.model''') a_ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''} a_ = '''>>zh<<''' a_ = '''Helsinki-NLP/''' if is_torch_available(): a_ = '''pt''' elif is_tf_available(): a_ = '''tf''' else: a_ = '''jax''' @require_sentencepiece class __lowercase ( _UpperCAmelCase , unittest.TestCase): """simple docstring""" _A : str = MarianTokenizer _A : List[str] = False _A : List[str] = True def __UpperCamelCase (self ): super().setUp() snake_case_ : Optional[int] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] snake_case_ : Any = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) ) snake_case_ : Any = Path(self.tmpdirname ) save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] ) save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] ) copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] ) snake_case_ : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __UpperCamelCase (self , **lowercase__ ): return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ ) def __UpperCamelCase (self , lowercase__ ): return ( "This is a test", "This is a test", ) def __UpperCamelCase (self ): snake_case_ : Union[str, Any] = """</s>""" snake_case_ : Tuple = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ ) def __UpperCamelCase (self ): snake_case_ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """</s>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(lowercase__ ) , 9 ) def __UpperCamelCase (self ): self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def __UpperCamelCase (self ): snake_case_ : Any = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' ) snake_case_ : Tuple = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) snake_case_ : Dict = [38, 1_21, 14, 6_97, 3_88_48, 0] self.assertListEqual(lowercase__ , batch.input_ids[0] ) snake_case_ : Tuple = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(lowercase__ ) snake_case_ : str = [x.name for x in Path(lowercase__ ).glob("""*""" )] self.assertIn("""source.spm""" , lowercase__ ) MarianTokenizer.from_pretrained(lowercase__ ) def __UpperCamelCase (self ): snake_case_ : Union[str, Any] = self.get_tokenizer() snake_case_ : List[str] = tok( ["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(batch.input_ids.shape , (2, 5_12) ) def __UpperCamelCase (self ): snake_case_ : Tuple = self.get_tokenizer() snake_case_ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def __UpperCamelCase (self ): # fmt: off snake_case_ : str = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , ) def __UpperCamelCase (self ): snake_case_ : Any = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" ) snake_case_ : Dict = """Tämä on testi""" snake_case_ : List[Any] = """This is a test""" snake_case_ : Optional[int] = [76, 7, 20_47, 2] snake_case_ : List[str] = [69, 12, 11, 9_40, 2] snake_case_ : Any = tokenizer(lowercase__ ).input_ids self.assertListEqual(lowercase__ , lowercase__ ) snake_case_ : str = tokenizer(text_target=lowercase__ ).input_ids self.assertListEqual(lowercase__ , lowercase__ ) snake_case_ : int = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ ) self.assertEqual(lowercase__ , lowercase__ )
48
0
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ): """simple docstring""" return x if y == 0 else greatest_common_divisor(SCREAMING_SNAKE_CASE_ , x % y ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] ): """simple docstring""" return (x * y) // greatest_common_divisor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 2_0 ): """simple docstring""" snake_case_ : List[Any] = 1 for i in range(1 , n + 1 ): snake_case_ : Tuple = lcm(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return g if __name__ == "__main__": print(F'''{solution() = }''')
715
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=_UpperCAmelCase) class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True}) _A : ClassVar[Features] = Features({"""audio""": Audio()}) _A : ClassVar[Features] = Features({"""transcription""": Value("""string""")}) _A : str = "audio" _A : str = "transcription" def __UpperCamelCase (self , lowercase__ ): if self.audio_column not in features: raise ValueError(f'Column {self.audio_column} is not present in features.' ) if not isinstance(features[self.audio_column] , lowercase__ ): raise ValueError(f'Column {self.audio_column} is not an Audio type.' ) snake_case_ : Optional[int] = copy.deepcopy(self ) snake_case_ : Tuple = self.input_schema.copy() snake_case_ : List[str] = features[self.audio_column] snake_case_ : Any = input_schema return task_template @property def __UpperCamelCase (self ): return {self.audio_column: "audio", self.transcription_column: "transcription"}
48
0
"""simple docstring""" from torch import nn def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f'Unsupported activation function: {act_fn}' )
716
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL a_ = logging.get_logger(__name__) class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : int = ["""pixel_values"""] def __init__(self , lowercase__ = True , lowercase__ = None , lowercase__ = 0.9 , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = 1 / 2_55 , lowercase__ = True , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ): super().__init__(**lowercase__ ) snake_case_ : Tuple = size if size is not None else {"""shortest_edge""": 2_24} snake_case_ : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ ) snake_case_ : str = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} snake_case_ : Dict = get_size_dict(lowercase__ , param_name="""crop_size""" ) snake_case_ : Union[str, Any] = do_resize snake_case_ : List[str] = size snake_case_ : str = crop_pct snake_case_ : str = resample snake_case_ : Optional[Any] = do_center_crop snake_case_ : Dict = crop_size snake_case_ : int = do_rescale snake_case_ : Optional[int] = rescale_factor snake_case_ : str = do_normalize snake_case_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN snake_case_ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ): snake_case_ : Tuple = get_size_dict(lowercase__ , default_to_square=lowercase__ ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' ) if crop_pct is not None: if "shortest_edge" in size: snake_case_ : Optional[int] = int(size["""shortest_edge"""] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: snake_case_ : Dict = int(size["""height"""] / crop_pct ) else: snake_case_ : List[str] = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct )) else: raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) ) snake_case_ : List[Any] = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ ) else: if "shortest_edge" in size: snake_case_ : Optional[int] = get_resize_output_image_size(lowercase__ , size=size["""shortest_edge"""] , default_to_square=lowercase__ ) elif "height" in size and "width" in size: snake_case_ : int = (size["""height"""], size["""width"""]) else: raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) ) return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ): snake_case_ : int = get_size_dict(lowercase__ ) if "height" not in size or "width" not in size: raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' ) return center_crop(lowercase__ , size=(size["""height"""], size["""width"""]) , data_format=lowercase__ , **lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ): return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ): return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ): snake_case_ : str = do_resize if do_resize is not None else self.do_resize snake_case_ : Any = crop_pct if crop_pct is not None else self.crop_pct snake_case_ : List[Any] = resample if resample is not None else self.resample snake_case_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ : str = do_normalize if do_normalize is not None else self.do_normalize snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean snake_case_ : int = image_std if image_std is not None else self.image_std snake_case_ : List[Any] = size if size is not None else self.size snake_case_ : Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ ) snake_case_ : List[Any] = crop_size if crop_size is not None else self.crop_size snake_case_ : int = get_size_dict(lowercase__ , param_name="""crop_size""" ) snake_case_ : List[str] = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_pct is None: raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. snake_case_ : int = [to_numpy_array(lowercase__ ) for image in images] if do_resize: snake_case_ : str = [self.resize(image=lowercase__ , size=lowercase__ , crop_pct=lowercase__ , resample=lowercase__ ) for image in images] if do_center_crop: snake_case_ : Optional[int] = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images] if do_rescale: snake_case_ : List[Any] = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images] if do_normalize: snake_case_ : Optional[Any] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images] snake_case_ : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images] snake_case_ : Dict = {"""pixel_values""": images} return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
48
0
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation a_ = logging.get_logger(__name__) a_ = {'''tokenizer_file''': '''tokenizer.json'''} a_ = { '''tokenizer_file''': { '''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''', '''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''', '''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''', '''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''', '''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''', '''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''', '''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''', }, } class __lowercase ( UpperCamelCase__): """simple docstring""" _A : Dict = VOCAB_FILES_NAMES _A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _A : List[Any] = ["""input_ids""", """attention_mask"""] _A : Tuple = None def __init__(self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<unk>" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<pad>" , lowercase__=False , lowercase__=False , **lowercase__ , ): super().__init__( lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , add_prefix_space=lowercase__ , clean_up_tokenization_spaces=lowercase__ , **lowercase__ , ) snake_case_ : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , lowercase__ ) != add_prefix_space: snake_case_ : int = getattr(lowercase__ , pre_tok_state.pop("""type""" ) ) snake_case_ : List[Any] = add_prefix_space snake_case_ : Any = pre_tok_class(**lowercase__ ) snake_case_ : int = add_prefix_space def __UpperCamelCase (self , *lowercase__ , **lowercase__ ): snake_case_ : List[str] = kwargs.get("""is_split_into_words""" , lowercase__ ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with' """ pretokenized inputs.""" ) return super()._batch_encode_plus(*lowercase__ , **lowercase__ ) def __UpperCamelCase (self , *lowercase__ , **lowercase__ ): snake_case_ : Any = kwargs.get("""is_split_into_words""" , lowercase__ ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with' """ pretokenized inputs.""" ) return super()._encode_plus(*lowercase__ , **lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ = None ): snake_case_ : Optional[Any] = self._tokenizer.model.save(lowercase__ , name=lowercase__ ) return tuple(lowercase__ ) def __UpperCamelCase (self , lowercase__ ): snake_case_ : Optional[Any] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] ) if len(lowercase__ ) > self.model_max_length: snake_case_ : Dict = input_ids[-self.model_max_length :] return input_ids
717
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: a_ = None a_ = logging.get_logger(__name__) a_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} a_ = { '''vocab_file''': { '''facebook/mbart-large-en-ro''': ( '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model''' ), '''facebook/mbart-large-cc25''': ( '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''', '''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''', }, } a_ = { '''facebook/mbart-large-en-ro''': 1024, '''facebook/mbart-large-cc25''': 1024, } # fmt: off a_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN'''] class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Dict = VOCAB_FILES_NAMES _A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _A : str = ["""input_ids""", """attention_mask"""] _A : Tuple = MBartTokenizer _A : List[int] = [] _A : List[int] = [] def __init__(self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ): # Mask token behave like a normal word, i.e. include the space before it snake_case_ : int = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token super().__init__( vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , **lowercase__ , ) snake_case_ : Dict = vocab_file snake_case_ : Optional[int] = False if not self.vocab_file else True snake_case_ : Optional[Any] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} ) snake_case_ : Any = { lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } snake_case_ : Tuple = src_lang if src_lang is not None else """en_XX""" snake_case_ : Tuple = self.convert_tokens_to_ids(self._src_lang ) snake_case_ : Tuple = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __UpperCamelCase (self ): return self._src_lang @src_lang.setter def __UpperCamelCase (self , lowercase__ ): snake_case_ : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __UpperCamelCase (self , lowercase__ , lowercase__ = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __UpperCamelCase (self , lowercase__ , lowercase__ = None ): snake_case_ : List[Any] = [self.sep_token_id] snake_case_ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ): if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) snake_case_ : int = src_lang snake_case_ : List[str] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ ) snake_case_ : List[str] = self.convert_tokens_to_ids(lowercase__ ) snake_case_ : Union[str, Any] = tgt_lang_id return inputs def __UpperCamelCase (self , lowercase__ , lowercase__ = "en_XX" , lowercase__ = None , lowercase__ = "ro_RO" , **lowercase__ , ): snake_case_ : List[str] = src_lang snake_case_ : int = tgt_lang return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ ) def __UpperCamelCase (self ): return self.set_src_lang_special_tokens(self.src_lang ) def __UpperCamelCase (self ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __UpperCamelCase (self , lowercase__ ): snake_case_ : int = self.convert_tokens_to_ids(lowercase__ ) snake_case_ : Tuple = [] snake_case_ : List[Any] = [self.eos_token_id, self.cur_lang_code] snake_case_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens ) snake_case_ : Optional[int] = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __UpperCamelCase (self , lowercase__ ): snake_case_ : Tuple = self.convert_tokens_to_ids(lowercase__ ) snake_case_ : Optional[int] = [] snake_case_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code] snake_case_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) snake_case_ : int = self.convert_ids_to_tokens(self.suffix_tokens ) snake_case_ : List[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __UpperCamelCase (self , lowercase__ , lowercase__ = None ): if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(lowercase__ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory.' ) return snake_case_ : List[str] = os.path.join( lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ): copyfile(self.vocab_file , lowercase__ ) return (out_vocab_file,)
48
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ = { '''configuration_xlm_roberta''': [ '''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaConfig''', '''XLMRobertaOnnxConfig''', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['''XLMRobertaTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['''XLMRobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ '''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaForCausalLM''', '''XLMRobertaForMaskedLM''', '''XLMRobertaForMultipleChoice''', '''XLMRobertaForQuestionAnswering''', '''XLMRobertaForSequenceClassification''', '''XLMRobertaForTokenClassification''', '''XLMRobertaModel''', '''XLMRobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ '''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLMRobertaForCausalLM''', '''TFXLMRobertaForMaskedLM''', '''TFXLMRobertaForMultipleChoice''', '''TFXLMRobertaForQuestionAnswering''', '''TFXLMRobertaForSequenceClassification''', '''TFXLMRobertaForTokenClassification''', '''TFXLMRobertaModel''', '''TFXLMRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ '''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxXLMRobertaForMaskedLM''', '''FlaxXLMRobertaForCausalLM''', '''FlaxXLMRobertaForMultipleChoice''', '''FlaxXLMRobertaForQuestionAnswering''', '''FlaxXLMRobertaForSequenceClassification''', '''FlaxXLMRobertaForTokenClassification''', '''FlaxXLMRobertaModel''', '''FlaxXLMRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
718
"""simple docstring""" import argparse import hashlib # hashlib is only used inside the Test class import struct class __lowercase : """simple docstring""" def __init__(self , lowercase__ ): snake_case_ : Union[str, Any] = data snake_case_ : List[str] = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0] @staticmethod def __UpperCamelCase (lowercase__ , lowercase__ ): return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f def __UpperCamelCase (self ): snake_case_ : Any = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64) snake_case_ : Tuple = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) ) return padded_data def __UpperCamelCase (self ): return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def __UpperCamelCase (self , lowercase__ ): snake_case_ : int = list(struct.unpack(""">16L""" , lowercase__ ) ) + [0] * 64 for i in range(16 , 80 ): snake_case_ : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def __UpperCamelCase (self ): snake_case_ : List[Any] = self.padding() snake_case_ : Any = self.split_blocks() for block in self.blocks: snake_case_ : Any = self.expand_block(lowercase__ ) snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[Any] = self.h for i in range(0 , 80 ): if 0 <= i < 20: snake_case_ : Optional[Any] = (b & c) | ((~b) & d) snake_case_ : List[str] = 0X5_a_8_2_7_9_9_9 elif 20 <= i < 40: snake_case_ : Union[str, Any] = b ^ c ^ d snake_case_ : Tuple = 0X6_e_d_9_e_b_a_1 elif 40 <= i < 60: snake_case_ : str = (b & c) | (b & d) | (c & d) snake_case_ : List[str] = 0X8_f_1_b_b_c_d_c elif 60 <= i < 80: snake_case_ : Tuple = b ^ c ^ d snake_case_ : str = 0Xc_a_6_2_c_1_d_6 snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[Any] = ( self.rotate(lowercase__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f, a, self.rotate(lowercase__ , 30 ), c, d, ) snake_case_ : Any = ( self.h[0] + a & 0Xf_f_f_f_f_f_f_f, self.h[1] + b & 0Xf_f_f_f_f_f_f_f, self.h[2] + c & 0Xf_f_f_f_f_f_f_f, self.h[3] + d & 0Xf_f_f_f_f_f_f_f, self.h[4] + e & 0Xf_f_f_f_f_f_f_f, ) return ("{:08x}" * 5).format(*self.h ) def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : Union[str, Any] = b"""Test String""" assert SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE__ ).hexdigest() # noqa: S324 def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : int = argparse.ArgumentParser(description="""Process some strings or files""" ) parser.add_argument( """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) snake_case_ : Optional[int] = parser.parse_args() snake_case_ : Optional[int] = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: snake_case_ : List[str] = f.read() else: snake_case_ : Dict = bytes(SCREAMING_SNAKE_CASE__ , """utf-8""" ) print(SHAaHash(SCREAMING_SNAKE_CASE__ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
48
0
"""simple docstring""" from typing import TYPE_CHECKING from ..utils import _LazyModule a_ = { '''config''': [ '''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''', '''OnnxConfig''', '''OnnxConfigWithPast''', '''OnnxSeq2SeqConfigWithPast''', '''PatchingSpec''', ], '''convert''': ['''export''', '''validate_model_outputs'''], '''features''': ['''FeaturesManager'''], '''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
719
"""simple docstring""" from manim import * class __lowercase ( _UpperCAmelCase): """simple docstring""" def __UpperCamelCase (self ): snake_case_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 ) snake_case_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )] snake_case_ : str = [mem.copy() for i in range(6 )] snake_case_ : str = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : Any = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : List[str] = VGroup(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : List[Any] = Text("""CPU""" , font_size=24 ) snake_case_ : Tuple = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowercase__ ) snake_case_ : List[Any] = [mem.copy() for i in range(4 )] snake_case_ : Tuple = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : List[str] = Text("""GPU""" , font_size=24 ) snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ ) gpu.move_to([-1, -1, 0] ) self.add(lowercase__ ) snake_case_ : Optional[Any] = [mem.copy() for i in range(6 )] snake_case_ : List[Any] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : Dict = Text("""Model""" , font_size=24 ) snake_case_ : int = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__ ) model.move_to([3, -1.0, 0] ) self.add(lowercase__ ) snake_case_ : Dict = [] for i, rect in enumerate(lowercase__ ): rect.set_stroke(lowercase__ ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) snake_case_ : List[str] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase__ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase__ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=lowercase__ , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase__ , buff=0.0 ) self.add(lowercase__ ) cpu_targs.append(lowercase__ ) snake_case_ : List[str] = [mem.copy() for i in range(6 )] snake_case_ : List[str] = VGroup(*lowercase__ ).arrange(lowercase__ , buff=0 ) snake_case_ : str = Text("""Loaded Checkpoint""" , font_size=24 ) snake_case_ : Any = Group(lowercase__ , lowercase__ ).arrange(lowercase__ , aligned_edge=lowercase__ , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) snake_case_ : Optional[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) snake_case_ : Union[str, Any] = MarkupText( f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowercase__ , lowercase__ ) snake_case_ : List[Any] = MarkupText( f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , ) blue_text.next_to(lowercase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) snake_case_ : List[Any] = MarkupText( f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowercase__ ) , Write(lowercase__ ) ) self.play(Write(lowercase__ , run_time=1 ) , Create(lowercase__ , run_time=1 ) ) snake_case_ : Optional[int] = [] snake_case_ : List[str] = [] for i, rect in enumerate(lowercase__ ): snake_case_ : Optional[Any] = fill.copy().set_fill(lowercase__ , opacity=0.7 ) target.move_to(lowercase__ ) first_animations.append(GrowFromCenter(lowercase__ , run_time=1 ) ) snake_case_ : List[Any] = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(lowercase__ , run_time=1.5 ) ) self.play(*lowercase__ ) self.play(*lowercase__ ) self.wait()
48
0
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class __lowercase ( unittest.TestCase): """simple docstring""" def __UpperCamelCase (self ): snake_case_ : Union[str, Any] = tempfile.mkdtemp() snake_case_ : Optional[Any] = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """的""", """价""", """格""", """是""", """15""", """便""", """alex""", """##andra""", """,""", """。""", """-""", """t""", """shirt""", ] snake_case_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) snake_case_ : str = { """do_resize""": True, """size""": {"""height""": 2_24, """width""": 2_24}, """do_center_crop""": True, """crop_size""": {"""height""": 18, """width""": 18}, """do_normalize""": True, """image_mean""": [0.48145466, 0.4578275, 0.40821073], """image_std""": [0.26862954, 0.26130258, 0.27577711], """do_convert_rgb""": True, } snake_case_ : Tuple = os.path.join(self.tmpdirname , __a ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(__a , __a ) def __UpperCamelCase (self , **lowercase__ ): return BertTokenizer.from_pretrained(self.tmpdirname , **__a ) def __UpperCamelCase (self , **lowercase__ ): return BertTokenizerFast.from_pretrained(self.tmpdirname , **__a ) def __UpperCamelCase (self , **lowercase__ ): return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__a ) def __UpperCamelCase (self ): shutil.rmtree(self.tmpdirname ) def __UpperCamelCase (self ): snake_case_ : Any = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] snake_case_ : List[Any] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs] return image_inputs def __UpperCamelCase (self ): snake_case_ : List[Any] = self.get_tokenizer() snake_case_ : Optional[Any] = self.get_rust_tokenizer() snake_case_ : Optional[Any] = self.get_image_processor() snake_case_ : Any = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a ) processor_slow.save_pretrained(self.tmpdirname ) snake_case_ : Tuple = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__a ) snake_case_ : Tuple = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a ) processor_fast.save_pretrained(self.tmpdirname ) snake_case_ : List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __a ) self.assertIsInstance(processor_fast.tokenizer , __a ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __a ) self.assertIsInstance(processor_fast.image_processor , __a ) def __UpperCamelCase (self ): snake_case_ : List[Any] = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) snake_case_ : Union[str, Any] = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" ) snake_case_ : int = self.get_image_processor(do_normalize=__a ) snake_case_ : Tuple = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=__a ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __a ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __a ) def __UpperCamelCase (self ): snake_case_ : Optional[int] = self.get_image_processor() snake_case_ : Union[str, Any] = self.get_tokenizer() snake_case_ : Any = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a ) snake_case_ : Union[str, Any] = self.prepare_image_inputs() snake_case_ : Optional[int] = image_processor(__a , return_tensors="""np""" ) snake_case_ : List[Any] = processor(images=__a , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __UpperCamelCase (self ): snake_case_ : Dict = self.get_image_processor() snake_case_ : Any = self.get_tokenizer() snake_case_ : Dict = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a ) snake_case_ : Tuple = """Alexandra,T-shirt的价格是15便士。""" snake_case_ : Tuple = processor(text=__a ) snake_case_ : str = tokenizer(__a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCamelCase (self ): snake_case_ : List[Any] = self.get_image_processor() snake_case_ : List[Any] = self.get_tokenizer() snake_case_ : Optional[int] = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a ) snake_case_ : Tuple = """Alexandra,T-shirt的价格是15便士。""" snake_case_ : List[Any] = self.prepare_image_inputs() snake_case_ : int = processor(text=__a , images=__a ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(__a ): processor() def __UpperCamelCase (self ): snake_case_ : Dict = self.get_image_processor() snake_case_ : int = self.get_tokenizer() snake_case_ : str = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a ) snake_case_ : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] snake_case_ : List[Any] = processor.batch_decode(__a ) snake_case_ : Union[str, Any] = tokenizer.batch_decode(__a ) self.assertListEqual(__a , __a ) def __UpperCamelCase (self ): snake_case_ : int = self.get_image_processor() snake_case_ : List[Any] = self.get_tokenizer() snake_case_ : List[str] = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a ) snake_case_ : List[str] = """Alexandra,T-shirt的价格是15便士。""" snake_case_ : Union[str, Any] = self.prepare_image_inputs() snake_case_ : Dict = processor(text=__a , images=__a ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
720
"""simple docstring""" from random import randint from tempfile import TemporaryFile import numpy as np def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" snake_case_ : Union[str, Any] = 0 if start < end: snake_case_ : Union[str, Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ : List[Any] = a[end] snake_case_ : Dict = a[pivot] snake_case_ : Any = temp snake_case_ , snake_case_ : Dict = _in_place_partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , p - 1 ) count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , p + 1 , SCREAMING_SNAKE_CASE__ ) return count def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ): """simple docstring""" snake_case_ : Tuple = 0 snake_case_ : List[Any] = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ : Dict = a[end] snake_case_ : List[Any] = a[pivot] snake_case_ : Optional[Any] = temp snake_case_ : List[str] = start - 1 for index in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value snake_case_ : Any = new_pivot_index + 1 snake_case_ : Tuple = a[new_pivot_index] snake_case_ : Optional[int] = a[index] snake_case_ : Tuple = temp snake_case_ : Union[str, Any] = a[new_pivot_index + 1] snake_case_ : Union[str, Any] = a[end] snake_case_ : Union[str, Any] = temp return new_pivot_index + 1, count a_ = TemporaryFile() a_ = 100 # 1000 elements are to be sorted a_ , a_ = 0, 1 # mean and standard deviation a_ = np.random.normal(mu, sigma, p) np.save(outfile, X) print('''The array is''') print(X) outfile.seek(0) # using the same array a_ = np.load(outfile) a_ = len(M) - 1 a_ = _in_place_quick_sort(M, 0, r) print( '''No of Comparisons for 100 elements selected from a standard normal distribution''' '''is :''' ) print(z)
48
0
"""simple docstring""" import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __lowercase ( unittest.TestCase): """simple docstring""" @property def __UpperCamelCase (self ): torch.manual_seed(0 ) snake_case_ : Optional[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def __UpperCamelCase (self ): snake_case_ : Dict = self.dummy_uncond_unet snake_case_ : Dict = ScoreSdeVeScheduler() snake_case_ : Any = ScoreSdeVePipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ ) sde_ve.to(UpperCAmelCase__ ) sde_ve.set_progress_bar_config(disable=UpperCAmelCase__ ) snake_case_ : List[Any] = torch.manual_seed(0 ) snake_case_ : Any = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=UpperCAmelCase__ ).images snake_case_ : Any = torch.manual_seed(0 ) snake_case_ : Tuple = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=UpperCAmelCase__ , return_dict=UpperCAmelCase__ )[ 0 ] snake_case_ : List[Any] = image[0, -3:, -3:, -1] snake_case_ : Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case_ : Optional[int] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __lowercase ( unittest.TestCase): """simple docstring""" def __UpperCamelCase (self ): snake_case_ : Any = """google/ncsnpp-church-256""" snake_case_ : List[Any] = UNetaDModel.from_pretrained(UpperCAmelCase__ ) snake_case_ : List[str] = ScoreSdeVeScheduler.from_pretrained(UpperCAmelCase__ ) snake_case_ : Dict = ScoreSdeVePipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ ) sde_ve.to(UpperCAmelCase__ ) sde_ve.set_progress_bar_config(disable=UpperCAmelCase__ ) snake_case_ : str = torch.manual_seed(0 ) snake_case_ : Dict = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=UpperCAmelCase__ ).images snake_case_ : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) snake_case_ : Dict = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
721
"""simple docstring""" import random def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : bool = False ): """simple docstring""" snake_case_ : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE__ )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(SCREAMING_SNAKE_CASE__ ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(SCREAMING_SNAKE_CASE__ ): for j in range(i + 1 , SCREAMING_SNAKE_CASE__ ): if random.random() < probability: graph[i].append(SCREAMING_SNAKE_CASE__ ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(SCREAMING_SNAKE_CASE__ ) return graph def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" return { i: [j for j in range(SCREAMING_SNAKE_CASE__ ) if i != j] for i in range(SCREAMING_SNAKE_CASE__ ) } if __name__ == "__main__": import doctest doctest.testmod()
48
0
"""simple docstring""" import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging a_ = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None ): """simple docstring""" return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ ) @dataclass class __lowercase : """simple docstring""" _A : Union[str, Any] = list_field( default=[] , metadata={ """help""": ( """Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version""" """ of all available models""" ) } , ) _A : Optional[Any] = list_field( default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""}) _A : Optional[int] = list_field( default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , ) _A : Tuple = field( default=__a , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , ) _A : str = field( default=__a , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , ) _A : List[Any] = field( default=__a , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""}) _A : str = field(default=__a , metadata={"""help""": """Use FP16 to accelerate inference."""}) _A : Optional[int] = field(default=__a , metadata={"""help""": """Benchmark training of model"""}) _A : List[Any] = field(default=__a , metadata={"""help""": """Verbose memory tracing"""}) _A : Any = field( default=__a , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , ) _A : List[str] = field( default=__a , metadata={ """help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory""" } , ) _A : List[Any] = field(default=__a , metadata={"""help""": """Trace memory line by line"""}) _A : int = field(default=__a , metadata={"""help""": """Save result to a CSV file"""}) _A : int = field(default=__a , metadata={"""help""": """Save all print statements in a log file"""}) _A : str = field(default=__a , metadata={"""help""": """Whether to print environment information"""}) _A : int = field( default=__a , metadata={ """help""": ( """Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use""" """ multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled""" """ for debugging / testing and on TPU.""" ) } , ) _A : Tuple = field( default=f'inference_time_{round(time())}.csv' , metadata={"""help""": """CSV filename used if saving time results to csv."""} , ) _A : List[str] = field( default=f'inference_memory_{round(time())}.csv' , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , ) _A : Tuple = field( default=f'train_time_{round(time())}.csv' , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , ) _A : int = field( default=f'train_memory_{round(time())}.csv' , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , ) _A : List[Any] = field( default=f'env_info_{round(time())}.csv' , metadata={"""help""": """CSV filename used if saving environment information."""} , ) _A : List[str] = field( default=f'log_{round(time())}.csv' , metadata={"""help""": """Log filename used if print statements are saved in log."""} , ) _A : int = field(default=3 , metadata={"""help""": """Times an experiment will be run."""}) _A : Optional[Any] = field( default=__a , metadata={ """help""": ( """Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain""" """ model weights.""" ) } , ) def __UpperCamelCase (self ): warnings.warn( f'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils' """ are deprecated in general and it is advised to use external Benchmarking libraries """ """ to benchmark Transformer models.""" , A__ , ) def __UpperCamelCase (self ): return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def __UpperCamelCase (self ): if len(self.models ) <= 0: raise ValueError( """Please make sure you provide at least one model name / model identifier, *e.g.* `--models""" """ bert-base-cased` or `args.models = [\'bert-base-cased\'].""" ) return self.models @property def __UpperCamelCase (self ): if not self.multi_process: return False elif self.is_tpu: logger.info("""Multiprocessing is currently not possible on TPU.""" ) return False else: return True
700
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json''' ), } class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Optional[int] = """dpr""" def __init__(self , lowercase__=3_05_22 , lowercase__=7_68 , lowercase__=12 , lowercase__=12 , lowercase__=30_72 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=2 , lowercase__=0.02 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__ = 0 , **lowercase__ , ): super().__init__(pad_token_id=lowercase__ , **lowercase__ ) snake_case_ : List[Any] = vocab_size snake_case_ : List[str] = hidden_size snake_case_ : Tuple = num_hidden_layers snake_case_ : List[Any] = num_attention_heads snake_case_ : int = hidden_act snake_case_ : Dict = intermediate_size snake_case_ : int = hidden_dropout_prob snake_case_ : Any = attention_probs_dropout_prob snake_case_ : str = max_position_embeddings snake_case_ : List[str] = type_vocab_size snake_case_ : List[str] = initializer_range snake_case_ : Optional[int] = layer_norm_eps snake_case_ : Union[str, Any] = projection_dim snake_case_ : str = position_embedding_type
48
0
"""simple docstring""" import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class __lowercase ( unittest.TestCase): """simple docstring""" def __init__(self , lowercase__ ): snake_case_ : Union[str, Any] = parent def __UpperCamelCase (self ): return {} def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : Any = """<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>""" snake_case_ : Union[str, Any] = """\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n """ return [html_string_a, html_string_a] @require_bsa class __lowercase ( _UpperCAmelCase , unittest.TestCase): """simple docstring""" _A : Optional[Any] = MarkupLMFeatureExtractor if is_bsa_available() else None def __UpperCamelCase (self ): snake_case_ : int = MarkupLMFeatureExtractionTester(self ) @property def __UpperCamelCase (self ): return self.feature_extract_tester.prepare_feat_extract_dict() def __UpperCamelCase (self ): snake_case_ : Any = self.feature_extraction_class() # Test not batched input snake_case_ : List[Any] = get_html_strings()[0] snake_case_ : Union[str, Any] = feature_extractor(lowercase__ ) # fmt: off snake_case_ : Optional[int] = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]] snake_case_ : Optional[int] = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]] # fmt: on self.assertEqual(encoding.nodes , lowercase__ ) self.assertEqual(encoding.xpaths , lowercase__ ) # Test batched snake_case_ : Optional[Any] = get_html_strings() snake_case_ : Tuple = feature_extractor(lowercase__ ) # fmt: off snake_case_ : Union[str, Any] = expected_nodes + [["""My First Heading""", """My first paragraph."""]] snake_case_ : Tuple = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]] self.assertEqual(len(encoding.nodes ) , 2 ) self.assertEqual(len(encoding.xpaths ) , 2 ) self.assertEqual(encoding.nodes , lowercase__ ) self.assertEqual(encoding.xpaths , lowercase__ )
701
"""simple docstring""" import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm a_ = re.compile('''[^A-Za-z_0-9]''') # parameters used in DuplicationIndex a_ = 10 a_ = 256 def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" if len(SCREAMING_SNAKE_CASE__ ) < MIN_NUM_TOKENS: return None snake_case_ : Union[str, Any] = MinHash(num_perm=SCREAMING_SNAKE_CASE__ ) for token in set(SCREAMING_SNAKE_CASE__ ): min_hash.update(token.encode() ) return min_hash def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE__ ) if len(t.strip() ) > 0} class __lowercase : """simple docstring""" def __init__(self , *, lowercase__ = 0.85 , ): snake_case_ : Tuple = duplication_jaccard_threshold snake_case_ : Optional[Any] = NUM_PERM snake_case_ : Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) snake_case_ : List[Any] = defaultdict(lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ ): snake_case_ : int = self._index.query(lowercase__ ) if code_key in self._index.keys: print(f'Duplicate key {code_key}' ) return self._index.insert(lowercase__ , lowercase__ ) if len(lowercase__ ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(lowercase__ ) break else: self._duplicate_clusters[close_duplicates[0]].add(lowercase__ ) def __UpperCamelCase (self ): snake_case_ : str = [] for base, duplicates in self._duplicate_clusters.items(): snake_case_ : Optional[Any] = [base] + list(lowercase__ ) # reformat the cluster to be a list of dict snake_case_ : Any = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster] duplicate_clusters.append(lowercase__ ) return duplicate_clusters def __UpperCamelCase (self , lowercase__ ): snake_case_ : int = self.get_duplicate_clusters() with open(lowercase__ , """w""" ) as f: json.dump(lowercase__ , lowercase__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" snake_case_ , snake_case_ : str = element snake_case_ : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] ): """simple docstring""" with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ): if data is not None: yield data def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float ): """simple docstring""" snake_case_ : int = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE__ ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE__ ) ) , max_queue_size=1_0_0 ) ): di.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" snake_case_ : int = get_tokens(SCREAMING_SNAKE_CASE__ ) snake_case_ : Tuple = get_tokens(SCREAMING_SNAKE_CASE__ ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) a_ = None def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" snake_case_ : Optional[Any] = [] for elementa in cluster: snake_case_ : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""] for elementa in extremes: snake_case_ : Any = _shared_dataset[elementa["""base_index"""]]["""content"""] if jaccard_similarity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) >= jaccard_threshold: elementa["copies"] += 1 break else: snake_case_ : Union[str, Any] = 1 extremes.append(SCREAMING_SNAKE_CASE__ ) return extremes def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): """simple docstring""" global _shared_dataset snake_case_ : str = dataset snake_case_ : int = [] snake_case_ : Optional[int] = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE__ ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) , total=len(SCREAMING_SNAKE_CASE__ ) , ): extremes_list.append(SCREAMING_SNAKE_CASE__ ) return extremes_list def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float = 0.85 ): """simple docstring""" snake_case_ : List[str] = make_duplicate_clusters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster} snake_case_ : str = {} snake_case_ : Dict = find_extremes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for extremes in extremes_clusters: for element in extremes: snake_case_ : int = element snake_case_ : Optional[int] = duplicate_indices - set(extreme_dict.keys() ) snake_case_ : List[Any] = dataset.filter(lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE__ ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: snake_case_ : List[Any] = element["""base_index"""] in extreme_dict if element["is_extreme"]: snake_case_ : str = extreme_dict[element["""base_index"""]]["""copies"""] print(f'Original dataset size: {len(SCREAMING_SNAKE_CASE__ )}' ) print(f'Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE__ )}' ) print(f'Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' ) print(f'Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' ) print(f'Filtered dataset size: {len(SCREAMING_SNAKE_CASE__ )}' ) return ds_filter, duplicate_clusters
48
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class __lowercase ( metaclass=__snake_case): """simple docstring""" _A : Dict = ["""transformers""", """torch""", """note_seq"""] def __init__(self , *lowercase__ , **lowercase__ ): requires_backends(self , ["""transformers""", """torch""", """note_seq"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] ) @classmethod def __UpperCamelCase (cls , *lowercase__ , **lowercase__ ): requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
702
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) a_ = logging.getLogger(__name__) if __name__ == "__main__": a_ = argparse.ArgumentParser( description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)''' ) parser.add_argument( '''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.''' ) parser.add_argument( '''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.''' ) parser.add_argument('''--vocab_size''', default=30522, type=int) a_ = parser.parse_args() logger.info(F'''Loading data from {args.data_file}''') with open(args.data_file, '''rb''') as fp: a_ = pickle.load(fp) logger.info('''Counting occurrences for MLM.''') a_ = Counter() for tk_ids in data: counter.update(tk_ids) a_ = [0] * args.vocab_size for k, v in counter.items(): a_ = v logger.info(F'''Dump to {args.token_counts_dump}''') with open(args.token_counts_dump, '''wb''') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
48
0
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] = 1_0 ): """simple docstring""" if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or n < 0: raise ValueError("""Invalid input""" ) snake_case_ : Tuple = 1_0**n snake_case_ : Any = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , lowerCAmelCase_ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F'''{solution(10) = }''')
703
"""simple docstring""" import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ): """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): """simple docstring""" snake_case_ : Optional[Any] = tmp_path / """cache""" snake_case_ : Optional[int] = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): snake_case_ : Tuple = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read() _check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): """simple docstring""" snake_case_ : List[Any] = tmp_path / """cache""" snake_case_ : int = {"""text""": """string"""} snake_case_ : Any = features.copy() if features else default_expected_features snake_case_ : List[Any] = ( Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None ) snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" snake_case_ : Union[str, Any] = tmp_path / """cache""" snake_case_ : Optional[Any] = {"""text""": """string"""} snake_case_ : Optional[int] = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read() _check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ): """simple docstring""" if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ : List[str] = text_path elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ : str = [text_path] snake_case_ : List[str] = tmp_path / """cache""" snake_case_ : List[str] = {"""text""": """string"""} snake_case_ : Dict = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=("train",) ): """simple docstring""" assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for split in splits: snake_case_ : Dict = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" snake_case_ : int = tmp_path / """cache""" snake_case_ : List[str] = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): snake_case_ : Optional[Any] = TextDatasetReader({"""train""": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read() _check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" snake_case_ : Tuple = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" snake_case_ : List[str] = {"""text""": """string"""} snake_case_ : int = features.copy() if features else default_expected_features snake_case_ : Tuple = ( Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None ) snake_case_ : str = TextDatasetReader({"""train""": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" if split: snake_case_ : Union[str, Any] = {split: text_path} else: snake_case_ : Union[str, Any] = """train""" snake_case_ : int = {"""train""": text_path, """test""": text_path} snake_case_ : List[Any] = tmp_path / """cache""" snake_case_ : Tuple = {"""text""": """string"""} snake_case_ : int = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
48
0
"""simple docstring""" import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class __lowercase : """simple docstring""" def __init__(self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=64 , lowercase__=32 , lowercase__=5 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=16 , lowercase__=2 , lowercase__=0.02 , lowercase__=3 , lowercase__=4 , lowercase__=None , ): snake_case_ : Optional[Any] = parent snake_case_ : Optional[Any] = batch_size snake_case_ : str = seq_length snake_case_ : Union[str, Any] = is_training snake_case_ : Tuple = use_input_mask snake_case_ : int = use_token_type_ids snake_case_ : List[Any] = use_labels snake_case_ : List[Any] = vocab_size snake_case_ : Any = hidden_size snake_case_ : Tuple = embedding_size snake_case_ : List[Any] = num_hidden_layers snake_case_ : Optional[Any] = num_attention_heads snake_case_ : List[str] = intermediate_size snake_case_ : List[str] = hidden_act snake_case_ : Dict = hidden_dropout_prob snake_case_ : List[Any] = attention_probs_dropout_prob snake_case_ : Tuple = max_position_embeddings snake_case_ : Optional[int] = type_vocab_size snake_case_ : List[str] = type_sequence_label_size snake_case_ : Optional[int] = initializer_range snake_case_ : Any = num_labels snake_case_ : Any = num_choices snake_case_ : Optional[int] = scope def __UpperCamelCase (self ): snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ : Union[str, Any] = None if self.use_input_mask: snake_case_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ : Dict = None if self.use_token_type_ids: snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ : Dict = None snake_case_ : Tuple = None snake_case_ : Optional[Any] = None if self.use_labels: snake_case_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ : int = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCamelCase (self ): return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Dict = MegatronBertModel(config=__snake_case ) model.to(__snake_case ) model.eval() snake_case_ : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case ) snake_case_ : str = model(__snake_case , token_type_ids=__snake_case ) snake_case_ : Optional[int] = model(__snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Dict = MegatronBertForMaskedLM(config=__snake_case ) model.to(__snake_case ) model.eval() snake_case_ : List[Any] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Any = MegatronBertForCausalLM(config=__snake_case ) model.to(__snake_case ) model.eval() snake_case_ : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Dict = MegatronBertForNextSentencePrediction(config=__snake_case ) model.to(__snake_case ) model.eval() snake_case_ : int = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Dict = MegatronBertForPreTraining(config=__snake_case ) model.to(__snake_case ) model.eval() snake_case_ : Optional[Any] = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Dict = MegatronBertForQuestionAnswering(config=__snake_case ) model.to(__snake_case ) model.eval() snake_case_ : Tuple = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : int = self.num_labels snake_case_ : List[str] = MegatronBertForSequenceClassification(__snake_case ) model.to(__snake_case ) model.eval() snake_case_ : List[str] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Optional[Any] = self.num_labels snake_case_ : int = MegatronBertForTokenClassification(config=__snake_case ) model.to(__snake_case ) model.eval() snake_case_ : Any = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Optional[Any] = self.num_choices snake_case_ : Tuple = MegatronBertForMultipleChoice(config=__snake_case ) model.to(__snake_case ) model.eval() snake_case_ : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ : Dict = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCamelCase (self ): snake_case_ : List[str] = self.prepare_config_and_inputs() ( snake_case_ ) : Optional[Any] = config_and_inputs snake_case_ : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase): """simple docstring""" _A : Tuple = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) _A : Tuple = ( { """feature-extraction""": MegatronBertModel, """fill-mask""": MegatronBertForMaskedLM, """question-answering""": MegatronBertForQuestionAnswering, """text-classification""": MegatronBertForSequenceClassification, """text-generation""": MegatronBertForCausalLM, """token-classification""": MegatronBertForTokenClassification, """zero-shot""": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) _A : List[str] = True # test_resize_embeddings = False _A : Union[str, Any] = False def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__=False ): snake_case_ : Union[str, Any] = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case ) if return_labels: if model_class in get_values(__snake_case ): snake_case_ : int = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case ) snake_case_ : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__snake_case ) return inputs_dict def __UpperCamelCase (self ): snake_case_ : List[Any] = MegatronBertModelTester(self ) snake_case_ : Union[str, Any] = ConfigTester(self , config_class=__snake_case , hidden_size=37 ) def __UpperCamelCase (self ): self.config_tester.run_common_tests() def __UpperCamelCase (self ): snake_case_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__snake_case ) def __UpperCamelCase (self ): snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case ) def __UpperCamelCase (self ): snake_case_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case ) def __UpperCamelCase (self ): snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case ) def __UpperCamelCase (self ): snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case ) def __UpperCamelCase (self ): snake_case_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case ) def __UpperCamelCase (self ): snake_case_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case ) def __UpperCamelCase (self ): snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ): return torch.tensor( a_ , dtype=torch.long , device=a_ , ) a_ = 1E-4 @require_torch @require_sentencepiece @require_tokenizers class __lowercase ( unittest.TestCase): """simple docstring""" @slow @unittest.skip("""Model is not available.""" ) def __UpperCamelCase (self ): snake_case_ : Union[str, Any] = '''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: snake_case_ : Any = os.path.join(os.environ["""MYDIR"""] , __snake_case ) snake_case_ : Union[str, Any] = MegatronBertModel.from_pretrained(__snake_case ) model.to(__snake_case ) model.half() snake_case_ : List[str] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] ) with torch.no_grad(): snake_case_ : Any = model(__snake_case )[0] snake_case_ : Tuple = torch.Size((1, 9, 10_24) ) self.assertEqual(output.shape , __snake_case ) snake_case_ : str = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): snake_case_ : int = output[0, ii, jj] snake_case_ : str = expected[3 * ii + jj] snake_case_ : Union[str, Any] = '''ii={} jj={} a={} b={}'''.format(__snake_case , __snake_case , __snake_case , __snake_case ) self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case ) , msg=__snake_case )
704
"""simple docstring""" from copy import deepcopy class __lowercase : """simple docstring""" def __init__(self , lowercase__ = None , lowercase__ = None ): if arr is None and size is not None: snake_case_ : str = size snake_case_ : Optional[Any] = [0] * size elif arr is not None: self.init(lowercase__ ) else: raise ValueError("""Either arr or size must be specified""" ) def __UpperCamelCase (self , lowercase__ ): snake_case_ : Optional[Any] = len(lowercase__ ) snake_case_ : int = deepcopy(lowercase__ ) for i in range(1 , self.size ): snake_case_ : Optional[Any] = self.next_(lowercase__ ) if j < self.size: self.tree[j] += self.tree[i] def __UpperCamelCase (self ): snake_case_ : Dict = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): snake_case_ : Optional[int] = self.next_(lowercase__ ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def __UpperCamelCase (lowercase__ ): return index + (index & (-index)) @staticmethod def __UpperCamelCase (lowercase__ ): return index - (index & (-index)) def __UpperCamelCase (self , lowercase__ , lowercase__ ): if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value snake_case_ : Tuple = self.next_(lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ ): self.add(lowercase__ , value - self.get(lowercase__ ) ) def __UpperCamelCase (self , lowercase__ ): if right == 0: return 0 snake_case_ : List[str] = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] snake_case_ : Optional[int] = self.prev(lowercase__ ) return result def __UpperCamelCase (self , lowercase__ , lowercase__ ): return self.prefix(lowercase__ ) - self.prefix(lowercase__ ) def __UpperCamelCase (self , lowercase__ ): return self.query(lowercase__ , index + 1 ) def __UpperCamelCase (self , lowercase__ ): value -= self.tree[0] if value < 0: return -1 snake_case_ : Tuple = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 snake_case_ : Tuple = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
48
0
"""simple docstring""" from PIL import Image def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Image , SCREAMING_SNAKE_CASE__ : float ): """simple docstring""" def brightness(SCREAMING_SNAKE_CASE__ : int ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change brightness to 100 a_ = change_brightness(img, 100) brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
705
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list ): """simple docstring""" snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ ) for i in range(1 , SCREAMING_SNAKE_CASE__ ): snake_case_ : Tuple = collection[i] snake_case_ : Tuple = 0 snake_case_ : str = i - 1 while low <= high: snake_case_ : Optional[int] = (low + high) // 2 if val < collection[mid]: snake_case_ : List[str] = mid - 1 else: snake_case_ : str = mid + 1 for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ): snake_case_ : List[str] = collection[j - 1] snake_case_ : Any = val return collection if __name__ == "__main__": a_ = input('''Enter numbers separated by a comma:\n''').strip() a_ = [int(item) for item in user_input.split(''',''')] print(binary_insertion_sort(unsorted))
48
0
"""simple docstring""" import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList a_ = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif'''] class __lowercase ( _UpperCamelCase): """simple docstring""" def __init__(self , lowercase__ , lowercase__ , lowercase__=None , lowercase__=1 ): snake_case_ : Union[str, Any] = tokenizer snake_case_ : Tuple = dataset snake_case_ : str = len(__a ) if n_tasks is None else n_tasks snake_case_ : Union[str, Any] = n_copies def __iter__(self ): snake_case_ : Optional[Any] = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]["""prompt"""].strip() ) snake_case_ : Dict = self.tokenizer(__a , padding=__a , return_tensors="""pt""" ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class __lowercase ( _UpperCamelCase): """simple docstring""" def __init__(self , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Union[str, Any] = start_length snake_case_ : Union[str, Any] = eof_strings snake_case_ : Tuple = tokenizer def __call__(self , lowercase__ , lowercase__ , **lowercase__ ): snake_case_ : List[str] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) snake_case_ : Any = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(__a ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" snake_case_ : List[Any] = re.split("""(%s)""" % """|""".join(lowercase_ ) , lowercase_ ) # last string should be "" return "".join(string_list[:-2] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any]=2_0 , **SCREAMING_SNAKE_CASE__ : Optional[Any] ): """simple docstring""" snake_case_ : List[Any] = defaultdict(lowercase_ ) # dict of list of generated tokens for step, batch in tqdm(enumerate(lowercase_ ) ): with torch.no_grad(): snake_case_ : Optional[int] = batch["ids"].shape[-1] snake_case_ : int = accelerator.unwrap_model(lowercase_ ).generate( input_ids=batch["""ids"""][:, : batch["""input_len"""]] , num_return_sequences=lowercase_ , **lowercase_ ) # each task is generated batch_size times snake_case_ : Optional[Any] = batch["task_id"].repeat(lowercase_ ) snake_case_ : Any = accelerator.pad_across_processes( lowercase_ , dim=1 , pad_index=tokenizer.pad_token_id ) snake_case_ : Dict = accelerator.gather((generated_tokens, generated_tasks) ) snake_case_ : Any = generated_tokens.cpu().numpy() snake_case_ : Optional[int] = generated_tasks.cpu().numpy() for task, generated_tokens in zip(lowercase_ , lowercase_ ): gen_token_dict[task].append(lowercase_ ) snake_case_ : Tuple = [[] for _ in range(lowercase_ )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: snake_case_ : Optional[int] = tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ ) code_gens[task].append(remove_last_block(lowercase_ ) ) return code_gens def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : Union[str, Any] = HfArgumentParser(lowercase_ ) snake_case_ : str = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric snake_case_ : str = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing snake_case_ : int = "false" if args.num_workers is None: snake_case_ : Optional[Any] = multiprocessing.cpu_count() # Use dataset load to feed to accelerate snake_case_ : Optional[Any] = Accelerator() set_seed(args.seed , device_specific=lowercase_ ) # Load model and tokenizer snake_case_ : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt ) snake_case_ : List[Any] = tokenizer.eos_token snake_case_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings snake_case_ : Optional[int] = { "do_sample": args.do_sample, "temperature": args.temperature, "max_new_tokens": args.max_new_tokens, "top_p": args.top_p, "top_k": args.top_k, "stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , lowercase_ , lowercase_ )] ), } # Load evaluation dataset and metric snake_case_ : List[Any] = load_dataset("""openai_humaneval""" ) snake_case_ : int = load_metric("""code_eval""" ) snake_case_ : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval["""test"""] ) snake_case_ : List[str] = args.n_samples // args.batch_size snake_case_ : Tuple = TokenizedDataset(lowercase_ , human_eval["""test"""] , n_copies=lowercase_ , n_tasks=lowercase_ ) # do not confuse args.batch_size, which is actually the num_return_sequences snake_case_ : List[Any] = DataLoader(lowercase_ , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: snake_case_ : Union[str, Any] = code_eval_metric.compute(references=[""""""] , predictions=[[""""""]] ) except ValueError as exception: print( """Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`""" """ flag to enable code evaluation.""" ) raise exception snake_case_ : Tuple = accelerator.prepare(lowercase_ , lowercase_ ) snake_case_ : Union[str, Any] = complete_code( lowercase_ , lowercase_ , lowercase_ , lowercase_ , n_tasks=lowercase_ , batch_size=args.batch_size , **lowercase_ , ) if accelerator.is_main_process: snake_case_ : Union[str, Any] = [] for task in tqdm(range(lowercase_ ) ): snake_case_ : List[Any] = human_eval["test"][task]["test"] snake_case_ : List[str] = f'check({human_eval["test"][task]["entry_point"]})' references.append("""\n""" + test_func + """\n""" + entry_point ) # Evaluate completions with "code_eval" metric snake_case_ : List[str] = code_eval_metric.compute( references=lowercase_ , predictions=lowercase_ , num_workers=args.num_workers ) print(f'Results: {pass_at_k}' ) # Save results to json file with open(args.output_file , """w""" ) as fp: json.dump(lowercase_ , lowercase_ ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
706
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Union[str, Any] = ["""image_processor""", """tokenizer"""] _A : str = """ChineseCLIPImageProcessor""" _A : Tuple = ("""BertTokenizer""", """BertTokenizerFast""") def __init__(self , lowercase__=None , lowercase__=None , **lowercase__ ): snake_case_ : Any = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , lowercase__ , ) snake_case_ : Optional[Any] = kwargs.pop("""feature_extractor""" ) snake_case_ : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(lowercase__ , lowercase__ ) snake_case_ : Union[str, Any] = self.image_processor def __call__(self , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ): if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: snake_case_ : Any = self.tokenizer(lowercase__ , return_tensors=lowercase__ , **lowercase__ ) if images is not None: snake_case_ : Tuple = self.image_processor(lowercase__ , return_tensors=lowercase__ , **lowercase__ ) if text is not None and images is not None: snake_case_ : List[Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ ) def __UpperCamelCase (self , *lowercase__ , **lowercase__ ): return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ ) def __UpperCamelCase (self , *lowercase__ , **lowercase__ ): return self.tokenizer.decode(*lowercase__ , **lowercase__ ) @property def __UpperCamelCase (self ): snake_case_ : Optional[int] = self.tokenizer.model_input_names snake_case_ : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def __UpperCamelCase (self ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase__ , ) return self.image_processor_class
48
0
"""simple docstring""" import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __lowercase ( unittest.TestCase): """simple docstring""" @property def __UpperCamelCase (self ): torch.manual_seed(0 ) snake_case_ : Dict = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def __UpperCamelCase (self ): snake_case_ : Union[str, Any] = self.dummy_uncond_unet snake_case_ : int = ScoreSdeVeScheduler() snake_case_ : Dict = ScoreSdeVePipeline(unet=_snake_case , scheduler=_snake_case ) sde_ve.to(_snake_case ) sde_ve.set_progress_bar_config(disable=_snake_case ) snake_case_ : List[str] = torch.manual_seed(0 ) snake_case_ : Dict = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_snake_case ).images snake_case_ : Optional[int] = torch.manual_seed(0 ) snake_case_ : str = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=_snake_case , return_dict=_snake_case )[ 0 ] snake_case_ : int = image[0, -3:, -3:, -1] snake_case_ : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) snake_case_ : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __lowercase ( unittest.TestCase): """simple docstring""" def __UpperCamelCase (self ): snake_case_ : List[Any] = """google/ncsnpp-church-256""" snake_case_ : Optional[Any] = UNetaDModel.from_pretrained(_snake_case ) snake_case_ : int = ScoreSdeVeScheduler.from_pretrained(_snake_case ) snake_case_ : Optional[int] = ScoreSdeVePipeline(unet=_snake_case , scheduler=_snake_case ) sde_ve.to(_snake_case ) sde_ve.set_progress_bar_config(disable=_snake_case ) snake_case_ : List[Any] = torch.manual_seed(0 ) snake_case_ : Optional[Any] = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=_snake_case ).images snake_case_ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) snake_case_ : Optional[Any] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
707
"""simple docstring""" import argparse import copy def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" snake_case_ : List[Any] = {} with open(SCREAMING_SNAKE_CASE__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: snake_case_ : int = [] _list.append([line.split()[1], line.split()[2]] ) snake_case_ : Optional[Any] = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: snake_case_ : str = [] _list.append([line.split()[0], line.split()[2]] ) snake_case_ : Optional[Any] = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" with open(SCREAMING_SNAKE_CASE__ ) as f: snake_case_ : Optional[Any] = f.read(1 ) snake_case_ : Union[str, Any] = start_node snake_case_ : Dict = [] snake_case_ : Union[str, Any] = start_node snake_case_ : Tuple = 0 while visiting not in first_solution: snake_case_ : int = 1_0_0_0_0 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution: snake_case_ : Union[str, Any] = k[1] snake_case_ : Any = k[0] first_solution.append(SCREAMING_SNAKE_CASE__ ) snake_case_ : Tuple = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ ) snake_case_ : List[str] = best_node first_solution.append(SCREAMING_SNAKE_CASE__ ) snake_case_ : Optional[Any] = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 snake_case_ : int = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0_0_0_0 ) return first_solution, distance_of_first_solution def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" snake_case_ : Union[str, Any] = [] for n in solution[1:-1]: snake_case_ : str = solution.index(SCREAMING_SNAKE_CASE__ ) for kn in solution[1:-1]: snake_case_ : Tuple = solution.index(SCREAMING_SNAKE_CASE__ ) if n == kn: continue snake_case_ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ ) snake_case_ : int = kn snake_case_ : Dict = n snake_case_ : Optional[int] = 0 for k in _tmp[:-1]: snake_case_ : Dict = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: snake_case_ : Dict = distance + int(i[1] ) _tmp.append(SCREAMING_SNAKE_CASE__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) snake_case_ : Optional[Any] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ): """simple docstring""" snake_case_ : Dict = 1 snake_case_ : List[Any] = first_solution snake_case_ : List[Any] = [] snake_case_ : Optional[Any] = distance_of_first_solution snake_case_ : Dict = solution while count <= iters: snake_case_ : List[str] = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ : List[Any] = 0 snake_case_ : List[Any] = neighborhood[index_of_best_solution] snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1 snake_case_ : List[str] = False while not found: snake_case_ : Tuple = 0 while i < len(SCREAMING_SNAKE_CASE__ ): if best_solution[i] != solution[i]: snake_case_ : Optional[Any] = best_solution[i] snake_case_ : int = solution[i] break snake_case_ : List[str] = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) snake_case_ : Tuple = True snake_case_ : Dict = best_solution[:-1] snake_case_ : Tuple = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: snake_case_ : Tuple = cost snake_case_ : Union[str, Any] = solution else: snake_case_ : str = index_of_best_solution + 1 snake_case_ : Tuple = neighborhood[index_of_best_solution] if len(SCREAMING_SNAKE_CASE__ ) >= size: tabu_list.pop(0 ) snake_case_ : List[str] = count + 1 return best_solution_ever, best_cost def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ): """simple docstring""" snake_case_ : Tuple = generate_neighbours(args.File ) snake_case_ , snake_case_ : Optional[Any] = generate_first_solution( args.File , SCREAMING_SNAKE_CASE__ ) snake_case_ , snake_case_ : Dict = tabu_search( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , ) print(f'Best solution: {best_sol}, with total distance: {best_cost}.' ) if __name__ == "__main__": a_ = argparse.ArgumentParser(description='''Tabu Search''') parser.add_argument( '''-f''', '''--File''', type=str, help='''Path to the file containing the data''', required=True, ) parser.add_argument( '''-i''', '''--Iterations''', type=int, help='''How many iterations the algorithm should perform''', required=True, ) parser.add_argument( '''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True ) # Pass the arguments to main method main(parser.parse_args())
48
0
"""simple docstring""" import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand a_ = ( """4S 3H 2C 7S 5H""", """9D 8H 2C 6S 7H""", """2D 6D 9D TH 7D""", """TC 8C 2S JH 6C""", """JH 8S TH AH QH""", """TS KS 5S 9S AC""", """KD 6S 9D TH AD""", """KS 8D 4D 9S 4S""", # pair """8C 4S KH JS 4D""", # pair """QH 8H KD JH 8S""", # pair """KC 4H KS 2H 8D""", # pair """KD 4S KC 3H 8S""", # pair """AH 8S AS KC JH""", # pair """3H 4C 4H 3S 2H""", # 2 pairs """5S 5D 2C KH KH""", # 2 pairs """3C KH 5D 5S KH""", # 2 pairs """AS 3C KH AD KH""", # 2 pairs """7C 7S 3S 7H 5S""", # 3 of a kind """7C 7S KH 2H 7H""", # 3 of a kind """AC KH QH AH AS""", # 3 of a kind """2H 4D 3C AS 5S""", # straight (low ace) """3C 5C 4C 2C 6H""", # straight """6S 8S 7S 5H 9H""", # straight """JS QS 9H TS KH""", # straight """QC KH TS JS AH""", # straight (high ace) """8C 9C 5C 3C TC""", # flush """3S 8S 9S 5S KS""", # flush """4C 5C 9C 8C KC""", # flush """JH 8H AH KH QH""", # flush """3D 2H 3H 2C 2D""", # full house """2H 2C 3S 3H 3D""", # full house """KH KC 3S 3H 3D""", # full house """JC 6H JS JD JH""", # 4 of a kind """JC 7H JS JD JH""", # 4 of a kind """JC KH JS JD JH""", # 4 of a kind """2S AS 4S 5S 3S""", # straight flush (low ace) """2D 6D 3D 4D 5D""", # straight flush """5C 6C 3C 7C 4C""", # straight flush """JH 9H TH KH QH""", # straight flush """JH AH TH KH QH""", # royal flush (high ace straight flush) ) a_ = ( ("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""), ("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""), ("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""), ("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""), ("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""), ("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""), ("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""), ("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""), ("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""), ("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""), ("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""), ("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""), ("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""), ("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""), ("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""), ("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""), ("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""), ("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""), ("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""), ("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""), ("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""), ("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""), ("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""), ("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""), ("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""), ("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""), ("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""), ("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""), ("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""), ) a_ = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", True), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", False), ("""AS 3S 4S 8S 2S""", True), ) a_ = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", False), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", True), ) a_ = ( ("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]), ("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]), ("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]), ("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]), ) a_ = ( ("""JH AH TH KH QH""", 0), ("""JH 9H TH KH QH""", 0), ("""JC KH JS JD JH""", 7), ("""KH KC 3S 3H 3D""", 6), ("""8C 9C 5C 3C TC""", 0), ("""JS QS 9H TS KH""", 0), ("""7C 7S KH 2H 7H""", 3), ("""3C KH 5D 5S KH""", 2), ("""QH 8H KD JH 8S""", 1), ("""2D 6D 9D TH 7D""", 0), ) a_ = ( ("""JH AH TH KH QH""", 23), ("""JH 9H TH KH QH""", 22), ("""JC KH JS JD JH""", 21), ("""KH KC 3S 3H 3D""", 20), ("""8C 9C 5C 3C TC""", 19), ("""JS QS 9H TS KH""", 18), ("""7C 7S KH 2H 7H""", 17), ("""3C KH 5D 5S KH""", 16), ("""QH 8H KD JH 8S""", 15), ("""2D 6D 9D TH 7D""", 14), ) def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ , snake_case_ : Any = randrange(len(SCREAMING_SNAKE_CASE__ ) ), randrange(len(SCREAMING_SNAKE_CASE__ ) ) snake_case_ : Tuple = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)] snake_case_ , snake_case_ : Dict = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 1_0_0 ): """simple docstring""" return (generate_random_hand() for _ in range(SCREAMING_SNAKE_CASE__ )) @pytest.mark.parametrize("""hand, expected""" , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ): """simple docstring""" assert PokerHand(SCREAMING_SNAKE_CASE__ )._is_flush() == expected @pytest.mark.parametrize("""hand, expected""" , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" assert PokerHand(SCREAMING_SNAKE_CASE__ )._is_straight() == expected @pytest.mark.parametrize("""hand, expected, card_values""" , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" snake_case_ : Dict = PokerHand(SCREAMING_SNAKE_CASE__ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("""hand, expected""" , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" assert PokerHand(SCREAMING_SNAKE_CASE__ )._is_same_kind() == expected @pytest.mark.parametrize("""hand, expected""" , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] ): """simple docstring""" assert PokerHand(SCREAMING_SNAKE_CASE__ )._hand_type == expected @pytest.mark.parametrize("""hand, other, expected""" , SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" assert PokerHand(SCREAMING_SNAKE_CASE__ ).compare_with(PokerHand(SCREAMING_SNAKE_CASE__ ) ) == expected @pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" assert PokerHand(SCREAMING_SNAKE_CASE__ ).compare_with(PokerHand(SCREAMING_SNAKE_CASE__ ) ) == expected def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : int = [PokerHand(SCREAMING_SNAKE_CASE__ ) for hand in SORTED_HANDS] snake_case_ : Dict = poker_hands.copy() shuffle(SCREAMING_SNAKE_CASE__ ) snake_case_ : int = chain(sorted(SCREAMING_SNAKE_CASE__ ) ) for index, hand in enumerate(SCREAMING_SNAKE_CASE__ ): assert hand == poker_hands[index] def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : List[Any] = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )] pokerhands.sort(reverse=SCREAMING_SNAKE_CASE__ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : Tuple = PokerHand("""2C 4S AS 3D 5C""" ) snake_case_ : int = True snake_case_ : Tuple = [5, 4, 3, 2, 1_4] for _ in range(1_0 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : Any = 0 snake_case_ : int = os.path.abspath(os.path.dirname(SCREAMING_SNAKE_CASE__ ) ) snake_case_ : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , """poker_hands.txt""" ) with open(SCREAMING_SNAKE_CASE__ ) as file_hand: for line in file_hand: snake_case_ : Dict = line[:1_4].strip() snake_case_ : Any = line[1_5:].strip() snake_case_ , snake_case_ : int = PokerHand(SCREAMING_SNAKE_CASE__ ), PokerHand(SCREAMING_SNAKE_CASE__ ) snake_case_ : Union[str, Any] = player.compare_with(SCREAMING_SNAKE_CASE__ ) if output == "Win": answer += 1 assert answer == 3_7_6
708
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings a_ = r''' [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: title_sep (`str`, *optional*, defaults to `" / "`): Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`]. doc_sep (`str`, *optional*, defaults to `" // "`): Separator inserted between the text of the retrieved document and the original input when calling [`RagRetriever`]. n_docs (`int`, *optional*, defaults to 5): Number of documents to retrieve. max_combined_length (`int`, *optional*, defaults to 300): Max length of contextualized input returned by [`~RagRetriever.__call__`]. retrieval_vector_size (`int`, *optional*, defaults to 768): Dimensionality of the document embeddings indexed by [`RagRetriever`]. retrieval_batch_size (`int`, *optional*, defaults to 8): Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated [`RagRetriever`]. dataset (`str`, *optional*, defaults to `"wiki_dpr"`): A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids using `datasets.list_datasets()`). dataset_split (`str`, *optional*, defaults to `"train"`) Which split of the `dataset` to load. index_name (`str`, *optional*, defaults to `"compressed"`) The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and `"compressed"`. index_path (`str`, *optional*) The path to the serialized faiss index on disk. passages_path (`str`, *optional*): A path to text passages compatible with the faiss index. Required if using [`~models.rag.retrieval_rag.LegacyIndex`] use_dummy_dataset (`bool`, *optional*, defaults to `False`) Whether to load a "dummy" variant of the dataset specified by `dataset`. label_smoothing (`float`, *optional*, defaults to 0.0): Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. do_marginalize (`bool`, *optional*, defaults to `False`): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. reduce_loss (`bool`, *optional*, defaults to `False`): Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation. do_deduplication (`bool`, *optional*, defaults to `True`): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. exclude_bos_score (`bool`, *optional*, defaults to `False`): Whether or not to disregard the BOS token when computing the loss. output_retrieved(`bool`, *optional*, defaults to `False`): If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask` are returned. See returned tensors for more detail. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. ''' @add_start_docstrings(_UpperCAmelCase) class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Optional[int] = """rag""" _A : Optional[Any] = True def __init__(self , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=" / " , lowercase__=" // " , lowercase__=5 , lowercase__=3_00 , lowercase__=7_68 , lowercase__=8 , lowercase__="wiki_dpr" , lowercase__="train" , lowercase__="compressed" , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=None , **lowercase__ , ): super().__init__( bos_token_id=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , prefix=lowercase__ , vocab_size=lowercase__ , **lowercase__ , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" snake_case_ : List[Any] = kwargs.pop("""question_encoder""" ) snake_case_ : Tuple = question_encoder_config.pop("""model_type""" ) snake_case_ : List[str] = kwargs.pop("""generator""" ) snake_case_ : List[str] = decoder_config.pop("""model_type""" ) from ..auto.configuration_auto import AutoConfig snake_case_ : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ ) snake_case_ : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ ) snake_case_ : int = reduce_loss snake_case_ : Optional[int] = label_smoothing snake_case_ : Dict = exclude_bos_score snake_case_ : Union[str, Any] = do_marginalize snake_case_ : Union[str, Any] = title_sep snake_case_ : int = doc_sep snake_case_ : int = n_docs snake_case_ : List[str] = max_combined_length snake_case_ : Tuple = dataset snake_case_ : int = dataset_split snake_case_ : str = index_name snake_case_ : List[str] = retrieval_vector_size snake_case_ : Dict = retrieval_batch_size snake_case_ : str = passages_path snake_case_ : Union[str, Any] = index_path snake_case_ : Tuple = use_dummy_dataset snake_case_ : Dict = output_retrieved snake_case_ : str = do_deduplication snake_case_ : Any = use_cache if self.forced_eos_token_id is None: snake_case_ : Any = getattr(self.generator , """forced_eos_token_id""" , lowercase__ ) @classmethod def __UpperCamelCase (cls , lowercase__ , lowercase__ , **lowercase__ ): return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase__ ) def __UpperCamelCase (self ): snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ ) snake_case_ : Any = self.question_encoder.to_dict() snake_case_ : Dict = self.generator.to_dict() snake_case_ : Union[str, Any] = self.__class__.model_type return output
48
0
"""simple docstring""" import argparse import os import re import packaging.version a_ = '''examples/''' a_ = { '''examples''': (re.compile(r'''^check_min_version\(\"[^\"]+\"\)\s*$''', re.MULTILINE), '''check_min_version(\"VERSION\")\n'''), '''init''': (re.compile(r'''^__version__\s+=\s+\"([^\"]+)\"\s*$''', re.MULTILINE), '''__version__ = \"VERSION\"\n'''), '''setup''': (re.compile(r'''^(\s*)version\s*=\s*\"[^\"]+\",''', re.MULTILINE), r'''\1version=\"VERSION\",'''), '''doc''': (re.compile(r'''^(\s*)release\s*=\s*\"[^\"]+\"$''', re.MULTILINE), '''release = \"VERSION\"\n'''), } a_ = { '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } a_ = '''README.md''' def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" with open(snake_case_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: snake_case_ : Optional[Any] = f.read() snake_case_ : Tuple = REPLACE_PATTERNS[pattern] snake_case_ : Union[str, Any] = replace.replace("""VERSION""" , snake_case_ ) snake_case_ : Dict = re_pattern.sub(snake_case_ , snake_case_ ) with open(snake_case_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(snake_case_ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" for folder, directories, fnames in os.walk(snake_case_ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(snake_case_ , snake_case_ ) , snake_case_ , pattern="""examples""" ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int=False ): """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(snake_case_ , snake_case_ , snake_case_ ) if not patch: update_version_in_examples(snake_case_ ) def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : Any = """🤗 Transformers currently provides the following architectures""" snake_case_ : int = """1. Want to contribute a new model?""" with open(snake_case_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: snake_case_ : Union[str, Any] = f.readlines() # Find the start of the list. snake_case_ : Dict = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 snake_case_ : Any = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): snake_case_ : Optional[int] = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , ) index += 1 with open(snake_case_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(snake_case_ ) def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" with open(REPLACE_FILES["""init"""] , """r""" ) as f: snake_case_ : Dict = f.read() snake_case_ : str = REPLACE_PATTERNS["""init"""][0].search(snake_case_ ).groups()[0] return packaging.version.parse(snake_case_ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=False ): """simple docstring""" snake_case_ : Dict = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: snake_case_ : Optional[Any] = default_version.base_version elif patch: snake_case_ : Any = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: snake_case_ : str = f'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. snake_case_ : Dict = input(f'Which version are you releasing? [{default_version}]' ) if len(snake_case_ ) == 0: snake_case_ : List[str] = default_version print(f'Updating version to {version}.' ) global_version_update(snake_case_ , patch=snake_case_ ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : List[str] = get_version() snake_case_ : Union[str, Any] = f'{current_version.major}.{current_version.minor + 1}.0.dev0' snake_case_ : List[str] = current_version.base_version # Check with the user we got that right. snake_case_ : Union[str, Any] = input(f'Which version are we developing now? [{dev_version}]' ) if len(snake_case_ ) == 0: snake_case_ : Union[str, Any] = dev_version print(f'Updating version to {version}.' ) global_version_update(snake_case_ ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') a_ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
709
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING a_ = logging.get_logger(__name__) class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Optional[int] = """upernet""" def __init__(self , lowercase__=None , lowercase__=5_12 , lowercase__=0.02 , lowercase__=[1, 2, 3, 6] , lowercase__=True , lowercase__=0.4 , lowercase__=3_84 , lowercase__=2_56 , lowercase__=1 , lowercase__=False , lowercase__=2_55 , **lowercase__ , ): super().__init__(**lowercase__ ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) snake_case_ : List[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(lowercase__ , lowercase__ ): snake_case_ : Tuple = backbone_config.get("""model_type""" ) snake_case_ : List[str] = CONFIG_MAPPING[backbone_model_type] snake_case_ : List[Any] = config_class.from_dict(lowercase__ ) snake_case_ : List[Any] = backbone_config snake_case_ : Optional[Any] = hidden_size snake_case_ : Any = initializer_range snake_case_ : str = pool_scales snake_case_ : Dict = use_auxiliary_head snake_case_ : str = auxiliary_loss_weight snake_case_ : List[str] = auxiliary_in_channels snake_case_ : Optional[Any] = auxiliary_channels snake_case_ : Any = auxiliary_num_convs snake_case_ : List[Any] = auxiliary_concat_input snake_case_ : List[str] = loss_ignore_index def __UpperCamelCase (self ): snake_case_ : Dict = copy.deepcopy(self.__dict__ ) snake_case_ : Union[str, Any] = self.backbone_config.to_dict() snake_case_ : Any = self.__class__.model_type return output
48
0
"""simple docstring""" import argparse import os from pathlib import Path import torch from bark.generation import _load_model as _bark_load_model from huggingface_hub import hf_hub_download from transformers import EncodecConfig, EncodecModel, set_seed from transformers.models.bark.configuration_bark import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel from transformers.utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) set_seed(770) a_ = { 'c_attn': 'att_proj', 'c_proj': 'out_proj', 'c_fc': 'in_proj', 'transformer.': '', 'h.': 'layers.', 'ln_1': 'layernorm_1', 'ln_2': 'layernorm_2', 'ln_f': 'layernorm_final', 'wpe': 'position_embeds_layer', 'wte': 'input_embeds_layer', } a_ = { 'text_small': { 'repo_id': 'suno/bark', 'file_name': 'text.pt', }, 'coarse_small': { 'repo_id': 'suno/bark', 'file_name': 'coarse.pt', }, 'fine_small': { 'repo_id': 'suno/bark', 'file_name': 'fine.pt', }, 'text': { 'repo_id': 'suno/bark', 'file_name': 'text_2.pt', }, 'coarse': { 'repo_id': 'suno/bark', 'file_name': 'coarse_2.pt', }, 'fine': { 'repo_id': 'suno/bark', 'file_name': 'fine_2.pt', }, } a_ = os.path.dirname(os.path.abspath(__file__)) a_ = os.path.join(os.path.expanduser('''~'''), '''.cache''') a_ = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''') def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ): """simple docstring""" snake_case_ : Optional[Any] = model_type if use_small: key += "_small" return os.path.join(__UpperCamelCase , REMOTE_MODEL_PATHS[key]["""file_name"""] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ): """simple docstring""" os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) hf_hub_download(repo_id=__UpperCamelCase , filename=__UpperCamelCase , local_dir=__UpperCamelCase ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : Optional[int]="text" ): """simple docstring""" if model_type == "text": snake_case_ : Optional[int] = BarkSemanticModel snake_case_ : str = BarkSemanticConfig snake_case_ : int = BarkSemanticGenerationConfig elif model_type == "coarse": snake_case_ : Optional[int] = BarkCoarseModel snake_case_ : Tuple = BarkCoarseConfig snake_case_ : Optional[int] = BarkCoarseGenerationConfig elif model_type == "fine": snake_case_ : str = BarkFineModel snake_case_ : List[Any] = BarkFineConfig snake_case_ : Optional[Any] = BarkFineGenerationConfig else: raise NotImplementedError() snake_case_ : Optional[Any] = f'{model_type}_small' if use_small else model_type snake_case_ : str = REMOTE_MODEL_PATHS[model_key] if not os.path.exists(__UpperCamelCase ): logger.info(f'{model_type} model not found, downloading into `{CACHE_DIR}`.' ) _download(model_info["""repo_id"""] , model_info["""file_name"""] ) snake_case_ : int = torch.load(__UpperCamelCase , map_location=__UpperCamelCase ) # this is a hack snake_case_ : Union[str, Any] = checkpoint['''model_args'''] if "input_vocab_size" not in model_args: snake_case_ : Any = model_args['''vocab_size'''] snake_case_ : int = model_args['''vocab_size'''] del model_args["vocab_size"] # convert Bark model arguments to HF Bark model arguments snake_case_ : Optional[Any] = model_args.pop("""n_head""" ) snake_case_ : Tuple = model_args.pop("""n_embd""" ) snake_case_ : Tuple = model_args.pop("""n_layer""" ) snake_case_ : Any = ConfigClass(**checkpoint["""model_args"""] ) snake_case_ : Tuple = ModelClass(config=__UpperCamelCase ) snake_case_ : Any = GenerationConfigClass() snake_case_ : Optional[int] = model_generation_config snake_case_ : Union[str, Any] = checkpoint['''model'''] # fixup checkpoint snake_case_ : List[str] = '''_orig_mod.''' for k, v in list(state_dict.items() ): if k.startswith(__UpperCamelCase ): # replace part of the key with corresponding layer name in HF implementation snake_case_ : List[Any] = k[len(__UpperCamelCase ) :] for old_layer_name in new_layer_name_dict: snake_case_ : List[Any] = new_k.replace(__UpperCamelCase , new_layer_name_dict[old_layer_name] ) snake_case_ : Optional[int] = state_dict.pop(__UpperCamelCase ) snake_case_ : List[str] = set(state_dict.keys() ) - set(model.state_dict().keys() ) snake_case_ : Optional[int] = {k for k in extra_keys if not k.endswith(""".attn.bias""" )} snake_case_ : Optional[int] = set(model.state_dict().keys() ) - set(state_dict.keys() ) snake_case_ : Any = {k for k in missing_keys if not k.endswith(""".attn.bias""" )} if len(__UpperCamelCase ) != 0: raise ValueError(f'extra keys found: {extra_keys}' ) if len(__UpperCamelCase ) != 0: raise ValueError(f'missing keys: {missing_keys}' ) model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase ) snake_case_ : Optional[Any] = model.num_parameters(exclude_embeddings=__UpperCamelCase ) snake_case_ : str = checkpoint['''best_val_loss'''].item() logger.info(f'model loaded: {round(n_params/1E6 , 1 )}M params, {round(__UpperCamelCase , 3 )} loss' ) model.eval() model.to(__UpperCamelCase ) del checkpoint, state_dict return model def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple="text" ): """simple docstring""" if model_type not in ("text", "coarse", "fine"): raise NotImplementedError() snake_case_ : str = '''cpu''' # do conversion on cpu snake_case_ : int = _get_ckpt_path(__UpperCamelCase , use_small=__UpperCamelCase ) snake_case_ : Optional[int] = _load_model(__UpperCamelCase , __UpperCamelCase , model_type=__UpperCamelCase , use_small=__UpperCamelCase ) # load bark initial model snake_case_ : List[Any] = _bark_load_model(__UpperCamelCase , """cpu""" , model_type=__UpperCamelCase , use_small=__UpperCamelCase ) if model_type == "text": snake_case_ : Optional[int] = bark_model['''model'''] if model.num_parameters(exclude_embeddings=__UpperCamelCase ) != bark_model.get_num_params(): raise ValueError("""initial and new models don\'t have the same number of parameters""" ) # check if same output as the bark model snake_case_ : Optional[Any] = 5 snake_case_ : Dict = 1_0 if model_type in ["text", "coarse"]: snake_case_ : Tuple = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int ) snake_case_ : Tuple = bark_model(__UpperCamelCase )[0] snake_case_ : int = model(__UpperCamelCase ) # take last logits snake_case_ : int = output_new_model_total.logits[:, [-1], :] else: snake_case_ : str = 3 snake_case_ : List[str] = 8 snake_case_ : List[Any] = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int ) snake_case_ : List[Any] = model(__UpperCamelCase , __UpperCamelCase ) snake_case_ : Union[str, Any] = bark_model(__UpperCamelCase , __UpperCamelCase ) snake_case_ : Optional[Any] = output_new_model_total.logits # output difference should come from the difference of self-attention implementation design if output_new_model.shape != output_old_model.shape: raise ValueError("""initial and new outputs don\'t have the same shape""" ) if (output_new_model - output_old_model).abs().max().item() > 1E-3: raise ValueError("""initial and new outputs are not equal""" ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , ): """simple docstring""" snake_case_ : int = os.path.join(__UpperCamelCase , __UpperCamelCase ) snake_case_ : List[str] = BarkSemanticConfig.from_pretrained(os.path.join(__UpperCamelCase , """config.json""" ) ) snake_case_ : Optional[Any] = BarkCoarseConfig.from_pretrained(os.path.join(__UpperCamelCase , """config.json""" ) ) snake_case_ : str = BarkFineConfig.from_pretrained(os.path.join(__UpperCamelCase , """config.json""" ) ) snake_case_ : int = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" ) snake_case_ : Dict = BarkSemanticModel.from_pretrained(__UpperCamelCase ) snake_case_ : str = BarkCoarseModel.from_pretrained(__UpperCamelCase ) snake_case_ : Optional[int] = BarkFineModel.from_pretrained(__UpperCamelCase ) snake_case_ : int = EncodecModel.from_pretrained("""facebook/encodec_24khz""" ) snake_case_ : str = BarkConfig.from_sub_model_configs( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) snake_case_ : Any = BarkGenerationConfig.from_sub_model_configs( semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config ) snake_case_ : List[Any] = BarkModel(__UpperCamelCase ) snake_case_ : List[Any] = semantic snake_case_ : Tuple = coarseAcoustic snake_case_ : Union[str, Any] = fineAcoustic snake_case_ : Any = codec snake_case_ : Union[str, Any] = bark_generation_config Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) bark.save_pretrained(__UpperCamelCase , repo_id=__UpperCamelCase , push_to_hub=__UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''') parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''') a_ = parser.parse_args() load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
710
"""simple docstring""" import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask a_ = logging.getLogger(__name__) class __lowercase ( _UpperCAmelCase): """simple docstring""" def __init__(self , lowercase__=-1 ): # in NER datasets, the last column is usually reserved for NER label snake_case_ : Union[str, Any] = label_idx def __UpperCamelCase (self , lowercase__ , lowercase__ ): if isinstance(lowercase__ , lowercase__ ): snake_case_ : List[str] = mode.value snake_case_ : List[Any] = os.path.join(lowercase__ , f'{mode}.txt' ) snake_case_ : Tuple = 1 snake_case_ : Any = [] with open(lowercase__ , encoding="""utf-8""" ) as f: snake_case_ : str = [] snake_case_ : List[Any] = [] for line in f: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) ) guid_index += 1 snake_case_ : Optional[Any] = [] snake_case_ : int = [] else: snake_case_ : Optional[Any] = line.split(""" """ ) words.append(splits[0] ) if len(lowercase__ ) > 1: labels.append(splits[self.label_idx].replace("""\n""" , """""" ) ) else: # Examples could have no label for mode = "test" labels.append("""O""" ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) ) return examples def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : str = 0 for line in test_input_reader: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": writer.write(lowercase__ ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: snake_case_ : Optional[int] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n""" writer.write(lowercase__ ) else: logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] ) def __UpperCamelCase (self , lowercase__ ): if path: with open(lowercase__ , """r""" ) as f: snake_case_ : Dict = f.read().splitlines() if "O" not in labels: snake_case_ : List[Any] = ["""O"""] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class __lowercase ( _UpperCAmelCase): """simple docstring""" def __init__(self ): # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def __UpperCamelCase (self , lowercase__ ): if path: with open(lowercase__ , """r""" ) as f: snake_case_ : Any = f.read().splitlines() if "O" not in labels: snake_case_ : Tuple = ["""O"""] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class __lowercase ( _UpperCAmelCase): """simple docstring""" def __UpperCamelCase (self , lowercase__ , lowercase__ ): if isinstance(lowercase__ , lowercase__ ): snake_case_ : List[Any] = mode.value snake_case_ : Optional[int] = os.path.join(lowercase__ , f'{mode}.txt' ) snake_case_ : Tuple = 1 snake_case_ : str = [] with open(lowercase__ , encoding="""utf-8""" ) as f: for sentence in parse_incr(lowercase__ ): snake_case_ : Tuple = [] snake_case_ : Any = [] for token in sentence: words.append(token["""form"""] ) labels.append(token["""upos"""] ) assert len(lowercase__ ) == len(lowercase__ ) if words: examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=lowercase__ , labels=lowercase__ ) ) guid_index += 1 return examples def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Dict = 0 for sentence in parse_incr(lowercase__ ): snake_case_ : int = preds_list[example_id] snake_case_ : Dict = """""" for token in sentence: out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ' out += "\n" writer.write(lowercase__ ) example_id += 1 def __UpperCamelCase (self , lowercase__ ): if path: with open(lowercase__ , """r""" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
48
0
"""simple docstring""" import unittest import numpy as np from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torchaudio, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class __lowercase ( unittest.TestCase): """simple docstring""" _A : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING _A : List[str] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : Dict = AudioClassificationPipeline(model=_lowerCamelCase , feature_extractor=_lowerCamelCase ) # test with a raw waveform snake_case_ : Tuple = np.zeros((3_40_00,) ) snake_case_ : List[Any] = np.zeros((1_40_00,) ) return audio_classifier, [audioa, audio] def __UpperCamelCase (self , lowercase__ , lowercase__ ): snake_case_ , snake_case_ : Any = examples snake_case_ : Dict = audio_classifier(_lowerCamelCase ) # by default a model is initialized with num_labels=2 self.assertEqual( _lowerCamelCase , [ {"""score""": ANY(_lowerCamelCase ), """label""": ANY(_lowerCamelCase )}, {"""score""": ANY(_lowerCamelCase ), """label""": ANY(_lowerCamelCase )}, ] , ) snake_case_ : Optional[Any] = audio_classifier(_lowerCamelCase , top_k=1 ) self.assertEqual( _lowerCamelCase , [ {"""score""": ANY(_lowerCamelCase ), """label""": ANY(_lowerCamelCase )}, ] , ) self.run_torchaudio(_lowerCamelCase ) @require_torchaudio def __UpperCamelCase (self , lowercase__ ): import datasets # test with a local file snake_case_ : Tuple = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) snake_case_ : str = dataset[0]["""audio"""]["""array"""] snake_case_ : Optional[Any] = audio_classifier(_lowerCamelCase ) self.assertEqual( _lowerCamelCase , [ {"""score""": ANY(_lowerCamelCase ), """label""": ANY(_lowerCamelCase )}, {"""score""": ANY(_lowerCamelCase ), """label""": ANY(_lowerCamelCase )}, ] , ) @require_torch def __UpperCamelCase (self ): snake_case_ : Union[str, Any] = """anton-l/wav2vec2-random-tiny-classifier""" snake_case_ : Optional[Any] = pipeline("""audio-classification""" , model=_lowerCamelCase ) snake_case_ : Dict = np.ones((80_00,) ) snake_case_ : int = audio_classifier(_lowerCamelCase , top_k=4 ) snake_case_ : Tuple = [ {"""score""": 0.0842, """label""": """no"""}, {"""score""": 0.0838, """label""": """up"""}, {"""score""": 0.0837, """label""": """go"""}, {"""score""": 0.0834, """label""": """right"""}, ] snake_case_ : List[Any] = [ {"""score""": 0.0845, """label""": """stop"""}, {"""score""": 0.0844, """label""": """on"""}, {"""score""": 0.0841, """label""": """right"""}, {"""score""": 0.0834, """label""": """left"""}, ] self.assertIn(nested_simplify(_lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) snake_case_ : str = {"""array""": np.ones((80_00,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate} snake_case_ : Optional[int] = audio_classifier(_lowerCamelCase , top_k=4 ) self.assertIn(nested_simplify(_lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) @require_torch @slow def __UpperCamelCase (self ): import datasets snake_case_ : Optional[int] = """superb/wav2vec2-base-superb-ks""" snake_case_ : Optional[Any] = pipeline("""audio-classification""" , model=_lowerCamelCase ) snake_case_ : Tuple = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" ) snake_case_ : Optional[Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa ) snake_case_ : Tuple = audio_classifier(_lowerCamelCase , top_k=4 ) self.assertEqual( nested_simplify(_lowerCamelCase , decimals=3 ) , [ {"""score""": 0.981, """label""": """go"""}, {"""score""": 0.007, """label""": """up"""}, {"""score""": 0.006, """label""": """_unknown_"""}, {"""score""": 0.001, """label""": """down"""}, ] , ) @require_tf @unittest.skip("""Audio classification is not implemented for TF""" ) def __UpperCamelCase (self ): pass
711
"""simple docstring""" import random def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" snake_case_ : Union[str, Any] = num - 1 snake_case_ : List[str] = 0 while s % 2 == 0: snake_case_ : str = s // 2 t += 1 for _ in range(5 ): snake_case_ : List[Any] = random.randrange(2 , num - 1 ) snake_case_ : Dict = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if v != 1: snake_case_ : int = 0 while v != (num - 1): if i == t - 1: return False else: snake_case_ : str = i + 1 snake_case_ : int = (v**2) % num return True def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" if num < 2: return False snake_case_ : Dict = [ 2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1, 4_3, 4_7, 5_3, 5_9, 6_1, 6_7, 7_1, 7_3, 7_9, 8_3, 8_9, 9_7, 1_0_1, 1_0_3, 1_0_7, 1_0_9, 1_1_3, 1_2_7, 1_3_1, 1_3_7, 1_3_9, 1_4_9, 1_5_1, 1_5_7, 1_6_3, 1_6_7, 1_7_3, 1_7_9, 1_8_1, 1_9_1, 1_9_3, 1_9_7, 1_9_9, 2_1_1, 2_2_3, 2_2_7, 2_2_9, 2_3_3, 2_3_9, 2_4_1, 2_5_1, 2_5_7, 2_6_3, 2_6_9, 2_7_1, 2_7_7, 2_8_1, 2_8_3, 2_9_3, 3_0_7, 3_1_1, 3_1_3, 3_1_7, 3_3_1, 3_3_7, 3_4_7, 3_4_9, 3_5_3, 3_5_9, 3_6_7, 3_7_3, 3_7_9, 3_8_3, 3_8_9, 3_9_7, 4_0_1, 4_0_9, 4_1_9, 4_2_1, 4_3_1, 4_3_3, 4_3_9, 4_4_3, 4_4_9, 4_5_7, 4_6_1, 4_6_3, 4_6_7, 4_7_9, 4_8_7, 4_9_1, 4_9_9, 5_0_3, 5_0_9, 5_2_1, 5_2_3, 5_4_1, 5_4_7, 5_5_7, 5_6_3, 5_6_9, 5_7_1, 5_7_7, 5_8_7, 5_9_3, 5_9_9, 6_0_1, 6_0_7, 6_1_3, 6_1_7, 6_1_9, 6_3_1, 6_4_1, 6_4_3, 6_4_7, 6_5_3, 6_5_9, 6_6_1, 6_7_3, 6_7_7, 6_8_3, 6_9_1, 7_0_1, 7_0_9, 7_1_9, 7_2_7, 7_3_3, 7_3_9, 7_4_3, 7_5_1, 7_5_7, 7_6_1, 7_6_9, 7_7_3, 7_8_7, 7_9_7, 8_0_9, 8_1_1, 8_2_1, 8_2_3, 8_2_7, 8_2_9, 8_3_9, 8_5_3, 8_5_7, 8_5_9, 8_6_3, 8_7_7, 8_8_1, 8_8_3, 8_8_7, 9_0_7, 9_1_1, 9_1_9, 9_2_9, 9_3_7, 9_4_1, 9_4_7, 9_5_3, 9_6_7, 9_7_1, 9_7_7, 9_8_3, 9_9_1, 9_9_7, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ): """simple docstring""" while True: snake_case_ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) ) if is_prime_low_num(SCREAMING_SNAKE_CASE__ ): return num if __name__ == "__main__": a_ = generate_large_prime() print(('''Prime number:''', num)) print(('''is_prime_low_num:''', is_prime_low_num(num)))
48
0
"""simple docstring""" import argparse import collections import json import os import re import string import sys import numpy as np a_ = re.compile(r'''\b(a|an|the)\b''', re.UNICODE) a_ = None def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" snake_case_ : int = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" ) parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" ) parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" ) parser.add_argument( """--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" ) parser.add_argument( """--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" ) parser.add_argument( """--na-prob-thresh""" , """-t""" , type=__snake_case , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , ) parser.add_argument( """--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__snake_case , help="""Save precision-recall curves to directory.""" ) parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" snake_case_ : Optional[int] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: snake_case_ : Optional[Any] = bool(qa["""answers"""]["""text"""] ) return qid_to_has_ans def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" def remove_articles(SCREAMING_SNAKE_CASE__ : List[Any] ): return ARTICLES_REGEX.sub(""" """ , __snake_case ) def white_space_fix(SCREAMING_SNAKE_CASE__ : int ): return " ".join(text.split() ) def remove_punc(SCREAMING_SNAKE_CASE__ : List[str] ): snake_case_ : Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(SCREAMING_SNAKE_CASE__ : List[str] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__snake_case ) ) ) ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" if not s: return [] return normalize_answer(__snake_case ).split() def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ): """simple docstring""" return int(normalize_answer(__snake_case ) == normalize_answer(__snake_case ) ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" snake_case_ : int = get_tokens(__snake_case ) snake_case_ : int = get_tokens(__snake_case ) snake_case_ : int = collections.Counter(__snake_case ) & collections.Counter(__snake_case ) snake_case_ : Optional[int] = sum(common.values() ) if len(__snake_case ) == 0 or len(__snake_case ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 snake_case_ : Union[str, Any] = 1.0 * num_same / len(__snake_case ) snake_case_ : List[Any] = 1.0 * num_same / len(__snake_case ) snake_case_ : List[Any] = (2 * precision * recall) / (precision + recall) return fa def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple ): """simple docstring""" snake_case_ : Optional[int] = {} snake_case_ : int = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: snake_case_ : str = qa["id"] snake_case_ : Optional[int] = [t for t in qa["answers"]["text"] if normalize_answer(__snake_case )] if not gold_answers: # For unanswerable questions, only correct answer is empty string snake_case_ : Optional[int] = [""] if qid not in preds: print(f'Missing prediction for {qid}' ) continue snake_case_ : Optional[Any] = preds[qid] # Take max over all gold answers snake_case_ : Dict = max(compute_exact(__snake_case , __snake_case ) for a in gold_answers ) snake_case_ : int = max(compute_fa(__snake_case , __snake_case ) for a in gold_answers ) return exact_scores, fa_scores def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ): """simple docstring""" snake_case_ : Optional[Any] = {} for qid, s in scores.items(): snake_case_ : Any = na_probs[qid] > na_prob_thresh if pred_na: snake_case_ : Optional[Any] = float(not qid_to_has_ans[qid] ) else: snake_case_ : Any = s return new_scores def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ): """simple docstring""" if not qid_list: snake_case_ : str = len(__snake_case ) return collections.OrderedDict( [ ("""exact""", 1_0_0.0 * sum(exact_scores.values() ) / total), ("""f1""", 1_0_0.0 * sum(fa_scores.values() ) / total), ("""total""", total), ] ) else: snake_case_ : Tuple = len(__snake_case ) return collections.OrderedDict( [ ("""exact""", 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total), ("""f1""", 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total), ("""total""", total), ] ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict ): """simple docstring""" for k in new_eval: snake_case_ : Tuple = new_eval[k] def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" plt.step(__snake_case , __snake_case , color="""b""" , alpha=0.2 , where="""post""" ) plt.fill_between(__snake_case , __snake_case , step="""post""" , alpha=0.2 , color="""b""" ) plt.xlabel("""Recall""" ) plt.ylabel("""Precision""" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(__snake_case ) plt.savefig(__snake_case ) plt.clf() def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Any=None ): """simple docstring""" snake_case_ : Tuple = sorted(__snake_case , key=lambda SCREAMING_SNAKE_CASE__ : na_probs[k] ) snake_case_ : Tuple = 0.0 snake_case_ : List[Any] = 1.0 snake_case_ : Optional[Any] = 0.0 snake_case_ : Optional[Any] = [1.0] snake_case_ : str = [0.0] snake_case_ : int = 0.0 for i, qid in enumerate(__snake_case ): if qid_to_has_ans[qid]: true_pos += scores[qid] snake_case_ : int = true_pos / float(i + 1 ) snake_case_ : int = true_pos / float(__snake_case ) if i == len(__snake_case ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(__snake_case ) recalls.append(__snake_case ) if out_image: plot_pr_curve(__snake_case , __snake_case , __snake_case , __snake_case ) return {"ap": 1_0_0.0 * avg_prec} def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple ): """simple docstring""" if out_image_dir and not os.path.exists(__snake_case ): os.makedirs(__snake_case ) snake_case_ : List[Any] = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return snake_case_ : Dict = make_precision_recall_eval( __snake_case , __snake_case , __snake_case , __snake_case , out_image=os.path.join(__snake_case , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , ) snake_case_ : int = make_precision_recall_eval( __snake_case , __snake_case , __snake_case , __snake_case , out_image=os.path.join(__snake_case , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , ) snake_case_ : Tuple = {k: float(__snake_case ) for k, v in qid_to_has_ans.items()} snake_case_ : Optional[int] = make_precision_recall_eval( __snake_case , __snake_case , __snake_case , __snake_case , out_image=os.path.join(__snake_case , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , ) merge_eval(__snake_case , __snake_case , """pr_exact""" ) merge_eval(__snake_case , __snake_case , """pr_f1""" ) merge_eval(__snake_case , __snake_case , """pr_oracle""" ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ): """simple docstring""" if not qid_list: return snake_case_ : Optional[Any] = [na_probs[k] for k in qid_list] snake_case_ : Optional[Any] = np.ones_like(__snake_case ) / float(len(__snake_case ) ) plt.hist(__snake_case , weights=__snake_case , bins=2_0 , range=(0.0, 1.0) ) plt.xlabel("""Model probability of no-answer""" ) plt.ylabel("""Proportion of dataset""" ) plt.title(f'Histogram of no-answer probability: {name}' ) plt.savefig(os.path.join(__snake_case , f'na_prob_hist_{name}.png' ) ) plt.clf() def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ): """simple docstring""" snake_case_ : int = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) snake_case_ : str = num_no_ans snake_case_ : Any = cur_score snake_case_ : int = 0.0 snake_case_ : Any = sorted(__snake_case , key=lambda SCREAMING_SNAKE_CASE__ : na_probs[k] ) for i, qid in enumerate(__snake_case ): if qid not in scores: continue if qid_to_has_ans[qid]: snake_case_ : Any = scores[qid] else: if preds[qid]: snake_case_ : Optional[int] = -1 else: snake_case_ : List[str] = 0 cur_score += diff if cur_score > best_score: snake_case_ : List[str] = cur_score snake_case_ : List[Any] = na_probs[qid] return 1_0_0.0 * best_score / len(__snake_case ), best_thresh def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" snake_case_ : Tuple = find_best_thresh(__snake_case , __snake_case , __snake_case , __snake_case ) snake_case_ : Dict = find_best_thresh(__snake_case , __snake_case , __snake_case , __snake_case ) snake_case_ : Any = best_exact snake_case_ : Tuple = exact_thresh snake_case_ : Any = best_fa snake_case_ : str = fa_thresh def SCREAMING_SNAKE_CASE__ ( ): """simple docstring""" with open(OPTS.data_file ) as f: snake_case_ : Any = json.load(__snake_case ) snake_case_ : Any = dataset_json["data"] with open(OPTS.pred_file ) as f: snake_case_ : List[str] = json.load(__snake_case ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: snake_case_ : str = json.load(__snake_case ) else: snake_case_ : Optional[Any] = {k: 0.0 for k in preds} snake_case_ : List[str] = make_qid_to_has_ans(__snake_case ) # maps qid to True/False snake_case_ : Tuple = [k for k, v in qid_to_has_ans.items() if v] snake_case_ : List[str] = [k for k, v in qid_to_has_ans.items() if not v] snake_case_ : Any = get_raw_scores(__snake_case , __snake_case ) snake_case_ : List[str] = apply_no_ans_threshold(__snake_case , __snake_case , __snake_case , OPTS.na_prob_thresh ) snake_case_ : List[str] = apply_no_ans_threshold(__snake_case , __snake_case , __snake_case , OPTS.na_prob_thresh ) snake_case_ : List[Any] = make_eval_dict(__snake_case , __snake_case ) if has_ans_qids: snake_case_ : Optional[Any] = make_eval_dict(__snake_case , __snake_case , qid_list=__snake_case ) merge_eval(__snake_case , __snake_case , """HasAns""" ) if no_ans_qids: snake_case_ : Any = make_eval_dict(__snake_case , __snake_case , qid_list=__snake_case ) merge_eval(__snake_case , __snake_case , """NoAns""" ) if OPTS.na_prob_file: find_all_best_thresh(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , OPTS.out_image_dir ) histogram_na_prob(__snake_case , __snake_case , OPTS.out_image_dir , """hasAns""" ) histogram_na_prob(__snake_case , __snake_case , OPTS.out_image_dir , """noAns""" ) if OPTS.out_file: with open(OPTS.out_file , """w""" ) as f: json.dump(__snake_case , __snake_case ) else: print(json.dumps(__snake_case , indent=2 ) ) if __name__ == "__main__": a_ = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use('''Agg''') import matplotlib.pyplot as plt main()
712
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType a_ = logging.get_logger(__name__) a_ = { '''microsoft/deberta-v2-xlarge''': '''https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json''', '''microsoft/deberta-v2-xxlarge''': '''https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json''', '''microsoft/deberta-v2-xlarge-mnli''': ( '''https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json''' ), '''microsoft/deberta-v2-xxlarge-mnli''': ( '''https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json''' ), } class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : Dict = """deberta-v2""" def __init__(self , lowercase__=12_81_00 , lowercase__=15_36 , lowercase__=24 , lowercase__=24 , lowercase__=61_44 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=0 , lowercase__=0.02 , lowercase__=1e-7 , lowercase__=False , lowercase__=-1 , lowercase__=0 , lowercase__=True , lowercase__=None , lowercase__=0 , lowercase__="gelu" , **lowercase__ , ): super().__init__(**lowercase__ ) snake_case_ : Union[str, Any] = hidden_size snake_case_ : str = num_hidden_layers snake_case_ : Tuple = num_attention_heads snake_case_ : Dict = intermediate_size snake_case_ : Optional[int] = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Any = attention_probs_dropout_prob snake_case_ : List[Any] = max_position_embeddings snake_case_ : Union[str, Any] = type_vocab_size snake_case_ : Union[str, Any] = initializer_range snake_case_ : List[Any] = relative_attention snake_case_ : Dict = max_relative_positions snake_case_ : Optional[int] = pad_token_id snake_case_ : List[str] = position_biased_input # Backwards compatibility if type(lowercase__ ) == str: snake_case_ : Union[str, Any] = [x.strip() for x in pos_att_type.lower().split("""|""" )] snake_case_ : Optional[int] = pos_att_type snake_case_ : List[str] = vocab_size snake_case_ : Tuple = layer_norm_eps snake_case_ : List[Any] = kwargs.get("""pooler_hidden_size""" , lowercase__ ) snake_case_ : List[str] = pooler_dropout snake_case_ : int = pooler_hidden_act class __lowercase ( _UpperCAmelCase): """simple docstring""" @property def __UpperCamelCase (self ): if self.task == "multiple-choice": snake_case_ : List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: snake_case_ : int = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def __UpperCamelCase (self ): return 12 def __UpperCamelCase (self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , lowercase__ = 3 , lowercase__ = 40 , lowercase__ = 40 , lowercase__ = None , ): snake_case_ : str = super().generate_dummy_inputs(preprocessor=lowercase__ , framework=lowercase__ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
48
0
"""simple docstring""" from typing import List from .keymap import KEYMAP, get_character def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" def decorator(SCREAMING_SNAKE_CASE__ : List[str] ): snake_case_ : Optional[int] = getattr(a__ , """handle_key""" , [] ) handle += [key] setattr(a__ , """handle_key""" , a__ ) return func return decorator def SCREAMING_SNAKE_CASE__ ( *SCREAMING_SNAKE_CASE__ : Dict ): """simple docstring""" def decorator(SCREAMING_SNAKE_CASE__ : str ): snake_case_ : List[str] = getattr(a__ , """handle_key""" , [] ) handle += keys setattr(a__ , """handle_key""" , a__ ) return func return decorator class __lowercase ( __lowercase): """simple docstring""" def __new__(cls , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : List[Any] = super().__new__(cls , __A , __A , __A ) if not hasattr(__A , """key_handler""" ): setattr(__A , """key_handler""" , {} ) setattr(__A , """handle_input""" , KeyHandler.handle_input ) for value in attrs.values(): snake_case_ : List[Any] = getattr(__A , """handle_key""" , [] ) for key in handled_keys: snake_case_ : List[Any] = value return new_cls @staticmethod def __UpperCamelCase (cls ): snake_case_ : int = get_character() if char != KEYMAP["undefined"]: snake_case_ : int = ord(__A ) snake_case_ : int = cls.key_handler.get(__A ) if handler: snake_case_ : List[Any] = char return handler(cls ) else: return None def SCREAMING_SNAKE_CASE__ ( cls : int ): """simple docstring""" return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
713
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): """simple docstring""" return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
48
0
"""simple docstring""" import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class __lowercase ( UpperCamelCase__): """simple docstring""" _A : Optional[Any] = 42 _A : int = jnp.floataa _A : int = True def __UpperCamelCase (self ): super().setup() snake_case_ : List[Any] = nn.Dense(5 , dtype=self.dtype ) def __call__(self , *lowercase__ , **lowercase__ ): snake_case_ : Any = super().__call__(*__A , **__A ) snake_case_ : Optional[Any] = self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class __lowercase ( UpperCamelCase__): """simple docstring""" _A : List[Any] = FlaxBigBirdForNaturalQuestionsModule def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): """simple docstring""" def cross_entropy(SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ): snake_case_ : Optional[Any] = logits.shape[-1] snake_case_ : str = (labels[..., None] == jnp.arange(UpperCamelCase__ )[None]).astype("""f4""" ) snake_case_ : Optional[Any] = jax.nn.log_softmax(UpperCamelCase__ , axis=-1 ) snake_case_ : Any = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: snake_case_ : List[Any] = reduction(UpperCamelCase__ ) return loss snake_case_ : Optional[Any] = partial(UpperCamelCase__ , reduction=jnp.mean ) snake_case_ : str = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) snake_case_ : Union[str, Any] = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) snake_case_ : Union[str, Any] = cross_entropy(UpperCamelCase__ , UpperCamelCase__ ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class __lowercase : """simple docstring""" _A : int = """google/bigbird-roberta-base""" _A : int = 3000 _A : Optional[int] = 10500 _A : Optional[Any] = 128 _A : List[Any] = 3 _A : List[str] = 1 _A : Any = 5 # tx_args _A : Optional[Any] = 3e-5 _A : Union[str, Any] = 0.0 _A : int = 20000 _A : List[Any] = 0.0095 _A : Union[str, Any] = """bigbird-roberta-natural-questions""" _A : Optional[int] = """training-expt""" _A : Dict = """data/nq-training.jsonl""" _A : Optional[int] = """data/nq-validation.jsonl""" def __UpperCamelCase (self ): os.makedirs(self.base_dir , exist_ok=__A ) snake_case_ : Optional[int] = os.path.join(self.base_dir , self.save_dir ) snake_case_ : str = self.batch_size_per_device * jax.device_count() @dataclass class __lowercase : """simple docstring""" _A : Tuple = 42 _A : Optional[Any] = 4096 # no dynamic padding on TPUs def __call__(self , lowercase__ ): snake_case_ : Any = self.collate_fn(__A ) snake_case_ : Any = jax.tree_util.tree_map(__A , __A ) return batch def __UpperCamelCase (self , lowercase__ ): snake_case_ , snake_case_ : Tuple = self.fetch_inputs(features["""input_ids"""] ) snake_case_ : Tuple = { """input_ids""": jnp.array(__A , dtype=jnp.intaa ), """attention_mask""": jnp.array(__A , dtype=jnp.intaa ), """start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ), """end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ), """pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ), } return batch def __UpperCamelCase (self , lowercase__ ): snake_case_ : Optional[int] = [self._fetch_inputs(__A ) for ids in input_ids] return zip(*__A ) def __UpperCamelCase (self , lowercase__ ): snake_case_ : Optional[int] = [1 for _ in range(len(__A ) )] while len(__A ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int=None ): """simple docstring""" if seed is not None: snake_case_ : Any = dataset.shuffle(seed=UpperCamelCase__ ) for i in range(len(UpperCamelCase__ ) // batch_size ): snake_case_ : int = dataset[i * batch_size : (i + 1) * batch_size] yield dict(UpperCamelCase__ ) @partial(jax.pmap , axis_name="""batch""" ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Dict ): """simple docstring""" def loss_fn(SCREAMING_SNAKE_CASE__ : Any ): snake_case_ : Union[str, Any] = model_inputs.pop("""start_labels""" ) snake_case_ : Dict = model_inputs.pop("""end_labels""" ) snake_case_ : int = model_inputs.pop("""pooled_labels""" ) snake_case_ : Union[str, Any] = state.apply_fn(**UpperCamelCase__ , params=UpperCamelCase__ , dropout_rng=UpperCamelCase__ , train=UpperCamelCase__ ) snake_case_ , snake_case_ , snake_case_ : Dict = outputs return state.loss_fn( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) snake_case_ , snake_case_ : List[Any] = jax.random.split(UpperCamelCase__ ) snake_case_ : Optional[int] = jax.value_and_grad(UpperCamelCase__ ) snake_case_ , snake_case_ : Any = grad_fn(state.params ) snake_case_ : List[Any] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) snake_case_ : Optional[int] = jax.lax.pmean(UpperCamelCase__ , """batch""" ) snake_case_ : List[Any] = state.apply_gradients(grads=UpperCamelCase__ ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="""batch""" ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ): """simple docstring""" snake_case_ : int = model_inputs.pop("""start_labels""" ) snake_case_ : Optional[Any] = model_inputs.pop("""end_labels""" ) snake_case_ : Tuple = model_inputs.pop("""pooled_labels""" ) snake_case_ : Union[str, Any] = state.apply_fn(**UpperCamelCase__ , params=state.params , train=UpperCamelCase__ ) snake_case_ , snake_case_ , snake_case_ : List[Any] = outputs snake_case_ : Any = state.loss_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case_ : int = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) return metrics class __lowercase ( train_state.TrainState): """simple docstring""" _A : List[Any] = struct.field(pytree_node=UpperCamelCase__) @dataclass class __lowercase : """simple docstring""" _A : Union[str, Any] = 42 _A : List[str] = 42 _A : Optional[Any] = 42 _A : List[Any] = 42 _A : Optional[int] = 42 _A : int = 42 _A : Optional[Any] = None def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ): snake_case_ : Optional[int] = model.params snake_case_ : Any = TrainState.create( apply_fn=model.__call__ , params=__A , tx=__A , loss_fn=__A , ) if ckpt_dir is not None: snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Dict = restore_checkpoint(__A , __A ) snake_case_ : Any = { """lr""": args.lr, """init_lr""": args.init_lr, """warmup_steps""": args.warmup_steps, """num_train_steps""": num_train_steps, """weight_decay""": args.weight_decay, } snake_case_ , snake_case_ : Any = build_tx(**__A ) snake_case_ : Optional[Any] = train_state.TrainState( step=__A , apply_fn=model.__call__ , params=__A , tx=__A , opt_state=__A , ) snake_case_ : List[str] = args snake_case_ : int = data_collator snake_case_ : Optional[int] = lr snake_case_ : Any = params snake_case_ : Tuple = jax_utils.replicate(__A ) return state def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ ): snake_case_ : str = self.args snake_case_ : Any = len(__A ) // args.batch_size snake_case_ : Tuple = jax.random.PRNGKey(0 ) snake_case_ : Optional[Any] = jax.random.split(__A , jax.device_count() ) for epoch in range(args.max_epochs ): snake_case_ : Dict = jnp.array(0 , dtype=jnp.floataa ) snake_case_ : str = get_batched_dataset(__A , args.batch_size , seed=__A ) snake_case_ : Optional[Any] = 0 for batch in tqdm(__A , total=__A , desc=f'Running EPOCH-{epoch}' ): snake_case_ : List[Any] = self.data_collator(__A ) snake_case_ , snake_case_ , snake_case_ : Dict = self.train_step_fn(__A , __A , **__A ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 if i % args.logging_steps == 0: snake_case_ : Optional[int] = jax_utils.unreplicate(state.step ) snake_case_ : Dict = running_loss.item() / i snake_case_ : List[str] = self.scheduler_fn(state_step - 1 ) snake_case_ : List[str] = self.evaluate(__A , __A ) snake_case_ : Dict = { """step""": state_step.item(), """eval_loss""": eval_loss.item(), """tr_loss""": tr_loss, """lr""": lr.item(), } tqdm.write(str(__A ) ) self.logger.log(__A , commit=__A ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=__A ) def __UpperCamelCase (self , lowercase__ , lowercase__ ): snake_case_ : Union[str, Any] = get_batched_dataset(__A , self.args.batch_size ) snake_case_ : Tuple = len(__A ) // self.args.batch_size snake_case_ : Optional[int] = jnp.array(0 , dtype=jnp.floataa ) snake_case_ : Tuple = 0 for batch in tqdm(__A , total=__A , desc="""Evaluating ... """ ): snake_case_ : List[Any] = self.data_collator(__A ) snake_case_ : Tuple = self.val_step_fn(__A , **__A ) running_loss += jax_utils.unreplicate(metrics["""loss"""] ) i += 1 return running_loss / i def __UpperCamelCase (self , lowercase__ , lowercase__ ): snake_case_ : List[Any] = jax_utils.unreplicate(__A ) print(f'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """ ) self.model_save_fn(__A , params=state.params ) with open(os.path.join(__A , """opt_state.msgpack""" ) , """wb""" ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args , os.path.join(__A , """args.joblib""" ) ) joblib.dump(self.data_collator , os.path.join(__A , """data_collator.joblib""" ) ) with open(os.path.join(__A , """training_state.json""" ) , """w""" ) as f: json.dump({"""step""": state.step.item()} , __A ) print("""DONE""" ) def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ): """simple docstring""" print(f'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ ) with open(os.path.join(UpperCamelCase__ , """flax_model.msgpack""" ) , """rb""" ) as f: snake_case_ : Any = from_bytes(state.params , f.read() ) with open(os.path.join(UpperCamelCase__ , """opt_state.msgpack""" ) , """rb""" ) as f: snake_case_ : List[str] = from_bytes(state.opt_state , f.read() ) snake_case_ : Optional[int] = joblib.load(os.path.join(UpperCamelCase__ , """args.joblib""" ) ) snake_case_ : List[str] = joblib.load(os.path.join(UpperCamelCase__ , """data_collator.joblib""" ) ) with open(os.path.join(UpperCamelCase__ , """training_state.json""" ) , """r""" ) as f: snake_case_ : Dict = json.load(UpperCamelCase__ ) snake_case_ : Optional[Any] = training_state["""step"""] print("""DONE""" ) return params, opt_state, step, args, data_collator def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ): """simple docstring""" snake_case_ : Optional[int] = num_train_steps - warmup_steps snake_case_ : List[Any] = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=UpperCamelCase__ , transition_steps=UpperCamelCase__ ) snake_case_ : Dict = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=1E-7 , transition_steps=UpperCamelCase__ ) snake_case_ : List[str] = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ): """simple docstring""" def weight_decay_mask(SCREAMING_SNAKE_CASE__ : Optional[Any] ): snake_case_ : Union[str, Any] = traverse_util.flatten_dict(UpperCamelCase__ ) snake_case_ : Union[str, Any] = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()} return traverse_util.unflatten_dict(UpperCamelCase__ ) snake_case_ : Tuple = scheduler_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) snake_case_ : Any = optax.adamw(learning_rate=UpperCamelCase__ , weight_decay=UpperCamelCase__ , mask=UpperCamelCase__ ) return tx, lr
714
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin a_ = get_tests_dir('''fixtures/test_sentencepiece.model''') a_ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''} a_ = '''>>zh<<''' a_ = '''Helsinki-NLP/''' if is_torch_available(): a_ = '''pt''' elif is_tf_available(): a_ = '''tf''' else: a_ = '''jax''' @require_sentencepiece class __lowercase ( _UpperCAmelCase , unittest.TestCase): """simple docstring""" _A : str = MarianTokenizer _A : List[str] = False _A : List[str] = True def __UpperCamelCase (self ): super().setUp() snake_case_ : Optional[int] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""] snake_case_ : Any = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) ) snake_case_ : Any = Path(self.tmpdirname ) save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] ) save_json(lowercase__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] ) copyfile(lowercase__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] ) snake_case_ : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __UpperCamelCase (self , **lowercase__ ): return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase__ ) def __UpperCamelCase (self , lowercase__ ): return ( "This is a test", "This is a test", ) def __UpperCamelCase (self ): snake_case_ : Union[str, Any] = """</s>""" snake_case_ : Tuple = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ ) def __UpperCamelCase (self ): snake_case_ : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """</s>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(lowercase__ ) , 9 ) def __UpperCamelCase (self ): self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def __UpperCamelCase (self ): snake_case_ : Any = MarianTokenizer.from_pretrained(f'{ORG_NAME}opus-mt-en-de' ) snake_case_ : Tuple = en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) snake_case_ : Dict = [38, 1_21, 14, 6_97, 3_88_48, 0] self.assertListEqual(lowercase__ , batch.input_ids[0] ) snake_case_ : Tuple = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(lowercase__ ) snake_case_ : str = [x.name for x in Path(lowercase__ ).glob("""*""" )] self.assertIn("""source.spm""" , lowercase__ ) MarianTokenizer.from_pretrained(lowercase__ ) def __UpperCamelCase (self ): snake_case_ : Union[str, Any] = self.get_tokenizer() snake_case_ : List[str] = tok( ["""I am a small frog""" * 10_00, """I am a small frog"""] , padding=lowercase__ , truncation=lowercase__ , return_tensors=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(batch.input_ids.shape , (2, 5_12) ) def __UpperCamelCase (self ): snake_case_ : Tuple = self.get_tokenizer() snake_case_ : Tuple = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowercase__ , return_tensors=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def __UpperCamelCase (self ): # fmt: off snake_case_ : str = {"""input_ids""": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , ) def __UpperCamelCase (self ): snake_case_ : Any = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" ) snake_case_ : Dict = """Tämä on testi""" snake_case_ : List[Any] = """This is a test""" snake_case_ : Optional[int] = [76, 7, 20_47, 2] snake_case_ : List[str] = [69, 12, 11, 9_40, 2] snake_case_ : Any = tokenizer(lowercase__ ).input_ids self.assertListEqual(lowercase__ , lowercase__ ) snake_case_ : str = tokenizer(text_target=lowercase__ ).input_ids self.assertListEqual(lowercase__ , lowercase__ ) snake_case_ : int = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ ) self.assertEqual(lowercase__ , lowercase__ )
48
0
"""simple docstring""" from cva import destroyAllWindows, imread, imshow, waitKey def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ): """simple docstring""" snake_case_ , snake_case_ : Any = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(_UpperCAmelCase ): for j in range(_UpperCAmelCase ): snake_case_ : int = [2_5_5, 2_5_5, 2_5_5] - img[i][j] return img if __name__ == "__main__": # read original image a_ = imread('''image_data/lena.jpg''', 1) # convert to its negative a_ = convert_to_negative(img) # show result image imshow('''negative of original image''', img) waitKey(0) destroyAllWindows()
715
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=_UpperCAmelCase) class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True}) _A : ClassVar[Features] = Features({"""audio""": Audio()}) _A : ClassVar[Features] = Features({"""transcription""": Value("""string""")}) _A : str = "audio" _A : str = "transcription" def __UpperCamelCase (self , lowercase__ ): if self.audio_column not in features: raise ValueError(f'Column {self.audio_column} is not present in features.' ) if not isinstance(features[self.audio_column] , lowercase__ ): raise ValueError(f'Column {self.audio_column} is not an Audio type.' ) snake_case_ : Optional[int] = copy.deepcopy(self ) snake_case_ : Tuple = self.input_schema.copy() snake_case_ : List[str] = features[self.audio_column] snake_case_ : Any = input_schema return task_template @property def __UpperCamelCase (self ): return {self.audio_column: "audio", self.transcription_column: "transcription"}
48
0
"""simple docstring""" import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu a_ = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json''' with io.open(filename, '''r''', encoding='''utf-8''') as f: a_ = json.load(f) @require_torch class __lowercase ( unittest.TestCase): """simple docstring""" def __UpperCamelCase (self , lowercase__ ): return FSMTTokenizer.from_pretrained(__a ) def __UpperCamelCase (self , lowercase__ ): snake_case_ : str = FSMTForConditionalGeneration.from_pretrained(__a ).to(__a ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["""en-ru""", 26.0], ["""ru-en""", 22.0], ["""en-de""", 22.0], ["""de-en""", 29.0], ] ) @slow def __UpperCamelCase (self , lowercase__ , lowercase__ ): snake_case_ : str = f'facebook/wmt19-{pair}' snake_case_ : int = self.get_tokenizer(__a ) snake_case_ : Tuple = self.get_model(__a ) snake_case_ : Tuple = bleu_data[pair]['src'] snake_case_ : Union[str, Any] = bleu_data[pair]['tgt'] snake_case_ : List[str] = tokenizer(__a , return_tensors="""pt""" , truncation=__a , padding="""longest""" ).to(__a ) snake_case_ : Dict = model.generate( input_ids=batch.input_ids , num_beams=8 , ) snake_case_ : Any = tokenizer.batch_decode( __a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) snake_case_ : Tuple = calculate_bleu(__a , __a ) print(__a ) self.assertGreaterEqual(scores["""bleu"""] , __a )
716
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL a_ = logging.get_logger(__name__) class __lowercase ( _UpperCAmelCase): """simple docstring""" _A : int = ["""pixel_values"""] def __init__(self , lowercase__ = True , lowercase__ = None , lowercase__ = 0.9 , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = 1 / 2_55 , lowercase__ = True , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ): super().__init__(**lowercase__ ) snake_case_ : Tuple = size if size is not None else {"""shortest_edge""": 2_24} snake_case_ : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ ) snake_case_ : str = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} snake_case_ : Dict = get_size_dict(lowercase__ , param_name="""crop_size""" ) snake_case_ : Union[str, Any] = do_resize snake_case_ : List[str] = size snake_case_ : str = crop_pct snake_case_ : str = resample snake_case_ : Optional[Any] = do_center_crop snake_case_ : Dict = crop_size snake_case_ : int = do_rescale snake_case_ : Optional[int] = rescale_factor snake_case_ : str = do_normalize snake_case_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN snake_case_ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ): snake_case_ : Tuple = get_size_dict(lowercase__ , default_to_square=lowercase__ ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' ) if crop_pct is not None: if "shortest_edge" in size: snake_case_ : Optional[int] = int(size["""shortest_edge"""] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: snake_case_ : Dict = int(size["""height"""] / crop_pct ) else: snake_case_ : List[str] = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct )) else: raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) ) snake_case_ : List[Any] = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ ) else: if "shortest_edge" in size: snake_case_ : Optional[int] = get_resize_output_image_size(lowercase__ , size=size["""shortest_edge"""] , default_to_square=lowercase__ ) elif "height" in size and "width" in size: snake_case_ : int = (size["""height"""], size["""width"""]) else: raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) ) return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ): snake_case_ : int = get_size_dict(lowercase__ ) if "height" not in size or "width" not in size: raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' ) return center_crop(lowercase__ , size=(size["""height"""], size["""width"""]) , data_format=lowercase__ , **lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ): return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ): return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ ) def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ): snake_case_ : str = do_resize if do_resize is not None else self.do_resize snake_case_ : Any = crop_pct if crop_pct is not None else self.crop_pct snake_case_ : List[Any] = resample if resample is not None else self.resample snake_case_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ : str = do_normalize if do_normalize is not None else self.do_normalize snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean snake_case_ : int = image_std if image_std is not None else self.image_std snake_case_ : List[Any] = size if size is not None else self.size snake_case_ : Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ ) snake_case_ : List[Any] = crop_size if crop_size is not None else self.crop_size snake_case_ : int = get_size_dict(lowercase__ , param_name="""crop_size""" ) snake_case_ : List[str] = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_pct is None: raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. snake_case_ : int = [to_numpy_array(lowercase__ ) for image in images] if do_resize: snake_case_ : str = [self.resize(image=lowercase__ , size=lowercase__ , crop_pct=lowercase__ , resample=lowercase__ ) for image in images] if do_center_crop: snake_case_ : Optional[int] = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images] if do_rescale: snake_case_ : List[Any] = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images] if do_normalize: snake_case_ : Optional[Any] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images] snake_case_ : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images] snake_case_ : Dict = {"""pixel_values""": images} return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
48
0